filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
dmriprep/utils/bids.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Utilities to handle BIDS inputs."""
import os
import sys
import json
from pathlib import Path
from bids import BIDSLayout
def collect_data(bids_dir, participant_label, bids_validate=True):
"""Replacement for niworkflows' version."""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
queries = {
"fmap": {"datatype": "fmap"},
"dwi": {"datatype": "dwi", "suffix": "dwi"},
"flair": {"datatype": "anat", "suffix": "FLAIR"},
"t2w": {"datatype": "anat", "suffix": "T2w"},
"t1w": {"datatype": "anat", "suffix": "T1w"},
"roi": {"datatype": "anat", "suffix": "roi"},
}
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query
)
)
for dtype, query in queries.items()
}
return subj_data, layout
def write_derivative_description(bids_dir, deriv_dir):
from ..__about__ import __version__, __url__, DOWNLOAD_URL
bids_dir = Path(bids_dir)
deriv_dir = Path(deriv_dir)
desc = {
"Name": "dMRIPrep - dMRI PREProcessing workflow",
"BIDSVersion": "1.1.1",
"PipelineDescription": {
"Name": "dMRIPrep",
"Version": __version__,
"CodeURL": DOWNLOAD_URL,
},
"CodeURL": __url__,
"HowToAcknowledge": "Please cite https://doi.org/10.5281/zenodo.3392201.",
}
# Keys that can only be set by environment
if "DMRIPREP_DOCKER_TAG" in os.environ:
desc["DockerHubContainerTag"] = os.environ["DMRIPREP_DOCKER_TAG"]
if "DMRIPREP_SINGULARITY_URL" in os.environ:
singularity_url = os.environ["DMRIPREP_SINGULARITY_URL"]
desc["SingularityContainerURL"] = singularity_url
singularity_md5 = _get_shub_version(singularity_url)
if singularity_md5 and singularity_md5 is not NotImplemented:
desc["SingularityContainerMD5"] = _get_shub_version(singularity_url)
# Keys deriving from source dataset
orig_desc = {}
fname = bids_dir / "dataset_description.json"
if fname.exists():
with fname.open() as fobj:
orig_desc = json.load(fobj)
if "DatasetDOI" in orig_desc:
desc["SourceDatasetsURLs"] = [
"https://doi.org/{}".format(orig_desc["DatasetDOI"])
]
if "License" in orig_desc:
desc["License"] = orig_desc["License"]
with (deriv_dir / "dataset_description.json").open("w") as fobj:
json.dump(desc, fobj, indent=4)
def validate_input_dir(exec_env, bids_dir, participant_label):
# Ignore issues and warnings that should not influence dMRIPrep
import tempfile
import subprocess
validator_config_dict = {
"ignore": [
"EVENTS_COLUMN_ONSET",
"EVENTS_COLUMN_DURATION",
"TSV_EQUAL_ROWS",
"TSV_EMPTY_CELL",
"TSV_IMPROPER_NA",
"INCONSISTENT_SUBJECTS",
"INCONSISTENT_PARAMETERS",
"PARTICIPANT_ID_COLUMN",
"PARTICIPANT_ID_MISMATCH",
"TASK_NAME_MUST_DEFINE",
"PHENOTYPE_SUBJECTS_MISSING",
"STIMULUS_FILE_MISSING",
"BOLD_NOT_4D",
"EVENTS_TSV_MISSING",
"TSV_IMPROPER_NA",
"ACQTIME_FMT",
"Participants age 89 or higher",
"DATASET_DESCRIPTION_JSON_MISSING",
"TASK_NAME_CONTAIN_ILLEGAL_CHARACTER",
"FILENAME_COLUMN",
"WRONG_NEW_LINE",
"MISSING_TSV_COLUMN_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_ELECTRODES",
"UNUSED_STIMULUS",
"CHANNELS_COLUMN_SFREQ",
"CHANNELS_COLUMN_LOWCUT",
"CHANNELS_COLUMN_HIGHCUT",
"CHANNELS_COLUMN_NOTCH",
"CUSTOM_COLUMN_WITHOUT_DESCRIPTION",
"ACQTIME_FMT",
"SUSPICIOUSLY_LONG_EVENT_DESIGN",
"SUSPICIOUSLY_SHORT_EVENT_DESIGN",
"MISSING_TSV_COLUMN_EEG_ELECTRODES",
"MISSING_SESSION",
],
"error": ["NO_T1W"],
"ignoredFiles": ["/dataset_description.json", "/participants.tsv"],
}
# Limit validation only to data from requested participants
if participant_label:
all_subs = set([s.name[4:] for s in bids_dir.glob("sub-*")])
selected_subs = set(
[s[4:] if s.startswith("sub-") else s for s in participant_label]
)
bad_labels = selected_subs.difference(all_subs)
if bad_labels:
error_msg = (
"Data for requested participant(s) label(s) not found. Could "
"not find data for participant(s): %s. Please verify the requested "
"participant labels."
)
if exec_env == "docker":
error_msg += (
" This error can be caused by the input data not being "
"accessible inside the docker container. Please make sure all "
"volumes are mounted properly (see https://docs.docker.com/"
"engine/reference/commandline/run/#mount-volume--v---read-only)"
)
if exec_env == "singularity":
error_msg += (
" This error can be caused by the input data not being "
"accessible inside the singularity container. Please make sure "
"all paths are mapped properly (see https://www.sylabs.io/"
"guides/3.0/user-guide/bind_paths_and_mounts.html)"
)
raise RuntimeError(error_msg % ",".join(bad_labels))
ignored_subs = all_subs.difference(selected_subs)
if ignored_subs:
for sub in ignored_subs:
validator_config_dict["ignoredFiles"].append("/sub-%s/**" % sub)
with tempfile.NamedTemporaryFile("w+") as temp:
temp.write(json.dumps(validator_config_dict))
temp.flush()
try:
subprocess.check_call(["bids-validator", bids_dir, "-c", temp.name])
except FileNotFoundError:
print("bids-validator does not appear to be installed", file=sys.stderr)
def _get_shub_version(singularity_url):
return NotImplemented
| []
| []
| [
"DMRIPREP_DOCKER_TAG",
"DMRIPREP_SINGULARITY_URL"
]
| [] | ["DMRIPREP_DOCKER_TAG", "DMRIPREP_SINGULARITY_URL"] | python | 2 | 0 | |
berangkat.py | import time
import configparser
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
def click_xpath (str):
while 1==1:
try:
vo_button = browser.find_element_by_xpath (str)
break
except:
pass
time.sleep (0.2)
browser.execute_script("arguments[0].click();", vo_button)
def get_focus_xpath (str):
v_timeout = time.time() + 20
vo_result = 0
while True:
try:
vo_result = browser.find_element_by_xpath (str)
break
except:
if time.time() > v_timeout:
break
return vo_result
def depart_all():
print("Starting depart All")
click_xpath ('//*[@id="flightStatusLanded"]')
click_xpath ('//*[@id="listDepartAll"]/div/button[2]')
print("finish depart all")
def full_tank():
# full tank fuel:
click_xpath('//*[@id="mapMaint"]')
state_string=WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="remCapacity"]')))
v_capacity = browser.find_element_by_xpath ('//*[@id="remCapacity"]')
v_purchase = browser.find_element_by_xpath ('//*[@id="amountInput"]')
v_purchase.clear()
int_capacity = int(v_capacity.text.replace(',',''))
# only fill tank if value below 6000000
if(int_capacity > 64000000):
print('low capacity')
v_purchase.send_keys ('6000000')
time.sleep(0.5)
click_xpath('//*[@id="fuelMain"]/div/div[7]/div/button[2]')
time.sleep(3)
# full tank co:
click_xpath('//*[@id="popBtn2"]')
time.sleep(0.5)
v_capacity = browser.find_element_by_xpath ('//*[@id="holding"]')
v_purchase = browser.find_element_by_xpath ('//*[@id="amountInput"]')
v_purchase.clear()
print('co holding ' + v_capacity.text)
# only fill if co negative
if(v_capacity.text.startswith("-")):
int_holding = int(v_capacity.text.replace(",","").replace("-",""))
max_capacity = int_holding + 13000000
v_purchase.send_keys (str(max_capacity))
print('after filling co')
time.sleep(0.5)
click_xpath('//*[@id="co2Main"]/div/div[8]/div/button[2]')
click_xpath ('//*[@id="popup"]/div/div/div[1]/div/span')
time.sleep(2)
def fill_tanks_and_depart_all():
while True:
print('inside fill_tanks_and_depart_all')
curr_time = time.localtime()
curr_clock = time.strftime("%H:%M:%S", curr_time)
print(curr_clock)
v_found = 0
v_timeout = time.time() + 10
while True:
try:
time.sleep(0.2)
v_departall = browser.find_element_by_xpath ('//*[@id="listDepartAll"]/div/button[2]')
print('inside second while')
if v_departall.is_displayed():
print('depart is displayed')
v_found = 1
break
except:
print('pass from second while')
pass
if time.time() > v_timeout:
print('time out')
break
if v_found == 1:
print('found == 1 on first while')
full_tank()
print('after full tank()')
time.sleep(2)
depart_all()
print('after depart all')
time.sleep(2)
else:
break
if __name__ == '__main__':
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--window-size=1920,1080")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(os.environ.get("AM4_URL"))
browser.find_element_by_xpath ('//*[@id="flightStatusInflight"]')
browser.implicitly_wait(2)
fill_tanks_and_depart_all()
| []
| []
| [
"AM4_URL"
]
| [] | ["AM4_URL"] | python | 1 | 0 | |
homepage/wsgi.py | """
WSGI config for homepage project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "homepage.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "homepage.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
molecule/default/tests/test_intermediate.py | import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('ssl-certificate-u1604')
@pytest.mark.parametrize("cert", [
'bundled.crt',
'combined.pem',
])
def test_intermediate(host, cert):
with host.sudo():
out = host.check_output(
'openssl crl2pkcs7 -nocrl -certfile /etc/ssl/localcerts/%s | '
'openssl pkcs7 -print_certs -text -noout |grep Subject:'
% cert)
lines = out.splitlines()
assert lines[0].strip().startswith(
'Subject: C=UK, ST=Scotland, L=Dundee, O=OME, CN=')
assert lines[1].strip().startswith(
'Subject: C=UK, ST=Dummy, L=Intermediate, O=Certificate')
@pytest.mark.parametrize("cert", [
'bundled.crt',
'combined.pem',
])
def test_combined(host, cert):
with host.sudo():
out = host.check_output('grep BEGIN /etc/ssl/localcerts/%s' % cert)
lines = out.splitlines()
assert lines[0] == '-----BEGIN CERTIFICATE-----'
assert lines[1] == '-----BEGIN CERTIFICATE-----'
if cert == 'bundled.crt':
assert len(lines) == 2
else:
assert len(lines) == 3
assert lines[2] == '-----BEGIN PRIVATE KEY-----'
| []
| []
| [
"MOLECULE_INVENTORY_FILE"
]
| [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
tools/publish_tool.py | import argparse
import os
import subprocess
import sys
def log(message, command=False):
prefix = "$" if command else "#"
print(f"{prefix} {message}", file=sys.stderr)
def run_command(description, args, capture_output=True, shell=True):
if description:
log(description)
printed_args = args.join(" ") if type(args) == list else args
log(printed_args, command=True)
stdout = subprocess.PIPE if capture_output else None
completed_process = subprocess.run(args, stdout=stdout, shell=shell, check=True, encoding="utf-8")
return completed_process.stdout.rstrip() if capture_output else None
def rust(args):
log(f"*** PUBLISHING RUST LIBRARY ***")
os.environ["CARGO_REGISTRY_TOKEN"] = os.environ["CRATES_IO_API_TOKEN"]
run_command("Logging into crates.io", f"cargo login")
dry_run_arg = " --dry-run" if args.dry_run else ""
run_command("Publishing opendp crate", f"cargo publish{dry_run_arg} --verbose --manifest-path=rust/opendp/Cargo.toml")
run_command("Letting crates.io index settle", f"sleep {args.settle_time}")
run_command("Publishing opendp-ffi crate", f"cargo publish{dry_run_arg} --verbose --manifest-path=rust/opendp-ffi/Cargo.toml")
def python(args):
log(f"*** PUBLISHING PYTHON LIBRARY ***")
# https://pypi.org/help/#apitoken
os.environ["TWINE_USERNAME"] = "__token__"
os.environ["TWINE_PASSWORD"] = os.environ["PYPI_API_TOKEN"]
dry_run_arg = " --repository testpypi" if args.dry_run else ""
run_command("Publishing opendp package", f"python3 -m twine upload{dry_run_arg} --verbose --skip-existing python/wheelhouse/*")
def meta(args):
meta_args = [
f"rust -r {args.rust_token}",
f"python -p {args.python_token}",
]
for args in meta_args:
_main(f"meta {args}".split())
def _main(argv):
parser = argparse.ArgumentParser(description="OpenDP build tool")
subparsers = parser.add_subparsers(dest="COMMAND", help="Command to run")
subparsers.required = True
subparser = subparsers.add_parser("rust", help="Publish Rust library")
subparser.set_defaults(func=rust)
subparser.add_argument("-n", "--dry-run", dest="dry_run", action="store_true", default=False)
subparser.add_argument("-nn", "--no-dry-run", dest="dry_run", action="store_false")
subparser.add_argument("-s", "--settle-time", default=60)
subparser = subparsers.add_parser("python", help="Publish Python library")
subparser.set_defaults(func=python)
subparser.add_argument("-n", "--dry-run", dest="dry_run", action="store_true", default=False)
subparser.add_argument("-nn", "--no-dry-run", dest="dry_run", action="store_false")
subparser = subparsers.add_parser("all", help="Publish everything")
subparser.set_defaults(func=meta, command="all")
args = parser.parse_args(argv[1:])
args.func(args)
def main():
_main(sys.argv)
if __name__ == "__main__":
main()
| []
| []
| [
"PYPI_API_TOKEN",
"CARGO_REGISTRY_TOKEN",
"TWINE_USERNAME",
"TWINE_PASSWORD",
"CRATES_IO_API_TOKEN"
]
| [] | ["PYPI_API_TOKEN", "CARGO_REGISTRY_TOKEN", "TWINE_USERNAME", "TWINE_PASSWORD", "CRATES_IO_API_TOKEN"] | python | 5 | 0 | |
cmd/mdl/gen.go | package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"goa.design/goa/v3/codegen"
"golang.org/x/tools/go/packages"
)
const tmpDirPrefix = "mdl--"
func gen(pkg string, debug bool) ([]byte, error) {
// Validate package import path
if _, err := packages.Load(&packages.Config{Mode: packages.NeedName}, pkg); err != nil {
return nil, err
}
// Write program that generates JSON
cwd, err := os.Getwd()
if err != nil {
cwd = "."
}
tmpDir, err := ioutil.TempDir(cwd, tmpDirPrefix)
if err != nil {
return nil, err
}
defer func() { os.RemoveAll(tmpDir) }()
var sections []*codegen.SectionTemplate
{
imports := []*codegen.ImportSpec{
codegen.SimpleImport("fmt"),
codegen.SimpleImport("io/ioutil"),
codegen.SimpleImport("encoding/json"),
codegen.SimpleImport("os"),
codegen.SimpleImport("goa.design/model/mdl"),
codegen.NewImport("_", pkg),
}
sections = []*codegen.SectionTemplate{
codegen.Header("Code Generator", "main", imports),
{Name: "main", Source: mainT},
}
}
cf := &codegen.File{Path: "main.go", SectionTemplates: sections}
if _, err := cf.Render(tmpDir); err != nil {
return nil, err
}
// Compile program
gobin, err := exec.LookPath("go")
if err != nil {
return nil, fmt.Errorf(`failed to find a go compiler, looked in "%s"`, os.Getenv("PATH"))
}
if _, err := runCmd(gobin, tmpDir, "build", "-o", "mdl"); err != nil {
return nil, err
}
// Run program
o, err := runCmd(path.Join(tmpDir, "mdl"), tmpDir, "model.json")
if debug {
fmt.Fprintln(os.Stderr, o)
}
if err != nil {
return nil, err
}
return ioutil.ReadFile(path.Join(tmpDir, "model.json"))
}
func runCmd(path, dir string, args ...string) (string, error) {
_ = os.Setenv("GO111MODULE", "on")
args = append([]string{path}, args...) // args[0] becomes exec path
c := exec.Cmd{Path: path, Args: args, Dir: dir}
b, err := c.CombinedOutput()
if err != nil {
if len(b) > 0 {
return "", fmt.Errorf(string(b))
}
return "", fmt.Errorf("failed to run command %q in directory %q: %s", path, dir, err)
}
return string(b), nil
}
// mainT is the template for the generator main.
const mainT = `func main() {
// Retrieve output path
out := os.Args[1]
// Run the model DSL
w, err := mdl.RunDSL()
if err != nil {
fmt.Fprint(os.Stderr, err.Error())
os.Exit(1)
}
b, err := json.MarshalIndent(w, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "failed to encode into JSON: %s", err.Error())
os.Exit(1)
}
if err := ioutil.WriteFile(out, b, 0644); err != nil {
fmt.Fprintf(os.Stderr, "failed to write file: %s", err.Error())
os.Exit(1)
}
}
`
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
pkg/server/controller.go | package server
import (
"encoding/json"
"fmt"
"io/ioutil"
golog "log"
"net/http"
"os"
"runtime"
"sync"
"text/template"
"time"
_ "net/http/pprof"
"github.com/markbates/pkger"
"github.com/pyroscope-io/pyroscope/pkg/build"
"github.com/pyroscope-io/pyroscope/pkg/config"
"github.com/pyroscope-io/pyroscope/pkg/storage"
"github.com/pyroscope-io/pyroscope/pkg/util/hyperloglog"
"github.com/sirupsen/logrus"
)
type Controller struct {
cfg *config.Config
s *storage.Storage
httpServer *http.Server
statsMutex sync.Mutex
stats map[string]int
appStats *hyperloglog.HyperLogLogPlus
}
func New(cfg *config.Config, s *storage.Storage) *Controller {
appStats, _ := hyperloglog.NewPlus(uint8(18))
return &Controller{
cfg: cfg,
s: s,
stats: make(map[string]int),
appStats: appStats,
}
}
func (ctrl *Controller) Stop() error {
if ctrl.httpServer != nil {
return ctrl.httpServer.Close()
}
return nil
}
// TODO: split the cli initialization from HTTP controller logic
func (ctrl *Controller) Start() {
mux := http.NewServeMux()
mux.HandleFunc("/ingest", ctrl.ingestHandler)
mux.HandleFunc("/render", ctrl.renderHandler)
mux.HandleFunc("/labels", ctrl.labelsHandler)
mux.HandleFunc("/label-values", ctrl.labelValuesHandler)
var dir http.FileSystem
if build.UseEmbeddedAssets {
// for this to work you need to run `pkger` first. See Makefile for more information
dir = pkger.Dir("/webapp/public")
} else {
dir = http.Dir("./webapp/public")
}
fs := http.FileServer(dir)
mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
ctrl.statsInc("index")
ctrl.renderIndexPage(dir, rw, r)
} else if r.URL.Path == "/comparison" {
ctrl.statsInc("index")
ctrl.renderIndexPage(dir, rw, r)
} else {
fs.ServeHTTP(rw, r)
}
})
logger := logrus.New()
w := logger.Writer()
defer w.Close()
ctrl.httpServer = &http.Server{
Addr: ctrl.cfg.Server.APIBindAddr,
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 30 * time.Second,
MaxHeaderBytes: 1 << 20,
ErrorLog: golog.New(w, "", 0),
}
err := ctrl.httpServer.ListenAndServe()
if err != nil {
if err == http.ErrServerClosed {
return
}
logrus.Error(err)
}
}
func renderServerError(rw http.ResponseWriter, text string) {
rw.WriteHeader(500)
rw.Write([]byte(text))
rw.Write([]byte("\n"))
}
type indexPageJSON struct {
AppNames []string `json:"appNames"`
}
type buildInfoJSON struct {
GOOS string `json:"goos"`
GOARCH string `json:"goarch"`
Version string `json:"version"`
ID string `json:"id"`
Time string `json:"time"`
GitSHA string `json:"gitSHA"`
GitDirty int `json:"gitDirty"`
UseEmbeddedAssets bool `json:"useEmbeddedAssets"`
}
type indexPage struct {
InitialState string
BuildInfo string
ExtraMetadata string
BaseURL string
}
func (ctrl *Controller) renderIndexPage(dir http.FileSystem, rw http.ResponseWriter, _ *http.Request) {
f, err := dir.Open("/index.html")
if err != nil {
renderServerError(rw, fmt.Sprintf("could not find file index.html: %q", err))
return
}
b, err := ioutil.ReadAll(f)
if err != nil {
renderServerError(rw, fmt.Sprintf("could not read file index.html: %q", err))
return
}
tmpl, err := template.New("index.html").Parse(string(b))
if err != nil {
renderServerError(rw, fmt.Sprintf("could not parse index.html template: %q", err))
return
}
initialStateObj := indexPageJSON{}
ctrl.s.GetValues("__name__", func(v string) bool {
initialStateObj.AppNames = append(initialStateObj.AppNames, v)
return true
})
b, err = json.Marshal(initialStateObj)
if err != nil {
renderServerError(rw, fmt.Sprintf("could not marshal initialStateObj json: %q", err))
return
}
initialStateStr := string(b)
buildInfoObj := buildInfoJSON{
GOOS: runtime.GOOS,
GOARCH: runtime.GOARCH,
Version: build.Version,
ID: build.ID,
Time: build.Time,
GitSHA: build.GitSHA,
GitDirty: build.GitDirty,
UseEmbeddedAssets: build.UseEmbeddedAssets,
}
b, err = json.Marshal(buildInfoObj)
if err != nil {
renderServerError(rw, fmt.Sprintf("could not marshal buildInfoObj json: %q", err))
return
}
buildInfoStr := string(b)
var extraMetadataStr string
extraMetadataPath := os.Getenv("PYROSCOPE_EXTRA_METADATA")
if extraMetadataPath != "" {
b, err = ioutil.ReadFile(extraMetadataPath)
if err != nil {
logrus.Errorf("failed to read file at %s", extraMetadataPath)
}
extraMetadataStr = string(b)
}
rw.Header().Add("Content-Type", "text/html")
rw.WriteHeader(200)
err = tmpl.Execute(rw, indexPage{
InitialState: initialStateStr,
BuildInfo: buildInfoStr,
ExtraMetadata: extraMetadataStr,
BaseURL: ctrl.cfg.Server.BaseURL,
})
if err != nil {
renderServerError(rw, fmt.Sprintf("could not marshal json: %q", err))
return
}
}
| [
"\"PYROSCOPE_EXTRA_METADATA\""
]
| []
| [
"PYROSCOPE_EXTRA_METADATA"
]
| [] | ["PYROSCOPE_EXTRA_METADATA"] | go | 1 | 0 | |
src/autoscaler/sync/sync_suite_test.go | package sync_test
import (
"database/sql"
"fmt"
"os"
"autoscaler/db"
_ "github.com/go-sql-driver/mysql"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestSync(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Sync Suite")
}
var (
dbHelper *sql.DB
lockTableName = "test_lock"
)
var _ = BeforeSuite(func() {
var e error
dbUrl := os.Getenv("DBURL")
if dbUrl == "" {
Fail("environment variable $DBURL is not set")
}
database, e := db.GetConnection(dbUrl)
if e != nil {
Fail("failed to get database URL and drivername: " + e.Error())
}
dbHelper, e = sql.Open(database.DriverName, database.DSN)
if e != nil {
Fail("can not connect database: " + e.Error())
}
e = createLockTable()
if e != nil {
Fail("can not create test lock table: " + e.Error())
}
})
var _ = AfterSuite(func() {
if dbHelper != nil {
e := dropLockTable()
if e != nil {
Fail("can not drop test lock table: " + e.Error())
}
dbHelper.Close()
}
})
func getLockOwner() string {
var owner string
query := fmt.Sprintf("SELECT owner FROM %s", lockTableName)
// #nosec G201
row := dbHelper.QueryRow(query)
err := row.Scan(&owner)
if err == sql.ErrNoRows {
owner = ""
}
return owner
}
func cleanLockTable() error {
_, err := dbHelper.Exec(fmt.Sprintf("DELETE FROM %s", lockTableName))
if err != nil {
return err
}
return nil
}
func dropLockTable() error {
_, err := dbHelper.Exec(fmt.Sprintf("DROP TABLE %s", lockTableName))
if err != nil {
return err
}
return nil
}
func createLockTable() error {
_, err := dbHelper.Exec(fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
owner VARCHAR(255) PRIMARY KEY,
lock_timestamp TIMESTAMP NOT NULL,
ttl BIGINT DEFAULT 0
);
`, lockTableName))
if err != nil {
return err
}
return nil
}
| [
"\"DBURL\""
]
| []
| [
"DBURL"
]
| [] | ["DBURL"] | go | 1 | 0 | |
oracle/pkg/database/dbdaemon/dbdaemon_server.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package dbdaemon implements a gRPC service for
// running privileged database ops, e.g. sqlplus, rman.
package dbdaemon
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
"github.com/godror/godror" // Register database/sql driver
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
lropb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"k8s.io/klog/v2"
"github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/common"
"github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/consts"
dbdpb "github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/oracle"
"github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/agents/security"
"github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/lib/lro"
"github.com/GoogleCloudPlatform/elcarro-oracle-operator/oracle/pkg/database/provision"
)
const (
listenerDir = "/u02/app/oracle/oraconfig/network"
)
var (
oraDataDir = "/u02/app/oracle/oradata"
maxWalkFiles = 10000
)
// oracleDatabase defines the sql.DB APIs, which will be used in this package
type oracleDatabase interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
Ping() error
Close() error
}
type dbdaemon interface {
shutdownDatabase(context.Context, godror.ShutdownMode) error
startupDatabase(context.Context, godror.StartupMode, string) error
setDatabaseUpgradeMode(ctx context.Context) error
openPDBs(ctx context.Context) error
runSQL(context.Context, []string, bool, bool, oracleDatabase) ([]string, error)
runQuery(context.Context, []string, oracleDatabase) ([]string, error)
}
// DB is a wrapper around database/sql.DB database handle.
// In unit tests it gets mocked with the FakeDB.
type DB struct {
}
// Server holds a database config.
type Server struct {
*dbdpb.UnimplementedDatabaseDaemonServer
hostName string
database dbdaemon
databaseSid *syncState
databaseHome string
pdbConnStr string
osUtil osUtil
dbdClient dbdpb.DatabaseDaemonProxyClient
dbdClientClose func() error
lroServer *lro.Server
syncJobs *syncJobs
gcsUtil GCSUtil
}
// Remove pdbConnStr from String(), as that may contain the pdb user/password
// Remove UnimplementedDatabaseDaemonServer field to improve logs for better readability
func (s Server) String() string {
pdbConnStr := s.pdbConnStr
if pdbConnStr != "" {
pdbConnStr = "<REDACTED>"
}
return fmt.Sprintf("{hostName=%q, database=%+v, databaseSid=%+v, databaseHome=%q, pdbConnStr=%q}", s.hostName, s.database, s.databaseSid, s.databaseHome, pdbConnStr)
}
type syncState struct {
sync.RWMutex
val string
}
type syncJobs struct {
// pdbLoadMutex is a mutex for operations running
// under consts.PDBLoaderUser user, currently those are DataPump import/export.
// pdbLoadMutex is used to ensure only one of such operations is running at a time.
pdbLoadMutex sync.Mutex
// Mutex used for maintenance operations (currently for patching)
maintenanceMutex sync.RWMutex
}
// Call this function to get any buffered DMBS_OUTPUT. sqlplus* calls this
// after every command issued. Typically any output you expect to see from
// sqlplus* will be returned via DBMS_OUTPUT.
func dbmsOutputGetLines(ctx context.Context, db oracleDatabase) ([]string, error) {
lines := make([]string, 0, 1024)
status := 0
// 0 is success, until it fails there may be more lines buffered.
for status == 0 {
var line string
if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.GET_LINE(:line, :status); END;",
sql.Named("line", sql.Out{Dest: &line}),
sql.Named("status", sql.Out{Dest: &status, In: true})); err != nil {
return nil, err
}
if status == 0 {
lines = append(lines, line)
}
}
return lines, nil
}
// shutdownDatabase performs a database shutdown in a requested <mode>.
// It always connects to the local database.
// Set ORACLE_HOME and ORACLE_SID in the env to control the target database.
// A caller may decide to ignore ORA-1034 and just log a warning
// if a database has already been down (or raise an error if appropriate)..
func (d *DB) shutdownDatabase(ctx context.Context, mode godror.ShutdownMode) error {
// Consider allowing PRELIM mode connections for SHUTDOWN ABORT mode.
// This is useful when the server has maxed out on connections.
db, err := sql.Open("godror", "oracle://?sysdba=1")
if err != nil {
klog.ErrorS(err, "dbdaemon/shutdownDatabase: failed to connect to a database")
return err
}
defer db.Close()
oraDB, err := godror.DriverConn(ctx, db)
if err != nil {
return err
}
if err := oraDB.Shutdown(mode); err != nil {
return err
}
// The shutdown process is over after the first Shutdown call in ABORT
// mode.
if mode == godror.ShutdownAbort {
return err
}
_, err = db.Exec("alter database close normal")
if err != nil && strings.Contains(err.Error(), "ORA-01507:") {
klog.InfoS("dbdaemon/shutdownDatabase: database is already closed", "err", err)
err = nil
}
if err != nil {
return err
}
_, err = db.Exec("alter database dismount")
if err != nil && strings.Contains(err.Error(), "ORA-01507:") {
klog.InfoS("dbdaemon/shutdownDatabase: database is already dismounted", "err", err)
err = nil
}
if err != nil {
return err
}
return oraDB.Shutdown(godror.ShutdownFinal)
}
// startupDatabase performs a database startup in a requested mode.
// godror.StartupMode controls FORCE/RESTRICT options.
// databaseState string controls NOMOUNT/MOUNT/OPEN options.
// Setting a pfile to use on startup is currently unsupported.
// It always connects to the local database.
// Set ORACLE_HOME and ORACLE_SID in the env to control the target database.
func (d *DB) startupDatabase(ctx context.Context, mode godror.StartupMode, state string) error {
// To startup a shutdown database, open a prelim connection.
db, err := sql.Open("godror", "oracle://?sysdba=1&prelim=1")
if err != nil {
return err
}
defer db.Close()
oraDB, err := godror.DriverConn(ctx, db)
if err != nil {
return err
}
if err := oraDB.Startup(mode); err != nil {
return err
}
if strings.ToLower(state) == "nomount" {
return nil
}
// To finish mounting/opening, open a normal connection.
db2, err := sql.Open("godror", "oracle://?sysdba=1")
if err != nil {
return err
}
defer db2.Close()
if _, err := db2.Exec("alter database mount"); err != nil {
return err
}
if strings.ToLower(state) == "mount" {
return nil
}
_, err = db2.Exec("alter database open")
return err
}
// Turn a freshly started NOMOUNT database to a migrate mode
// Opens CDB in upgrade mode
// Opens all PDBs in upgrade mode
// Executes the following steps:
// SQL> alter database mount
// SQL> alter database open upgrade
// SQL> alter pluggable database all open upgrade
func (d *DB) setDatabaseUpgradeMode(ctx context.Context) error {
db, err := sql.Open("godror", "oracle://?sysdba=1")
if err != nil {
return fmt.Errorf("dbdaemon/setDatabaseUpgradeMode failed to open DB connection: %w", err)
}
defer db.Close()
// SQL> alter database mount -- this will turn CDB$ROOT, PDB$SEED and all PDBs into 'MOUNTED' state
if _, err := db.Exec("alter database mount"); err != nil {
return err
}
// SQL> alter database open upgrade -- this will turn CDB$ROOT, PDB$SEED into 'MIGRATE' state
if _, err := db.Exec("alter database open upgrade"); err != nil {
return err
}
// SQL> alter pluggable database all open upgrade
if _, err := db.Exec("alter pluggable database all open upgrade"); err != nil {
return err
}
// At this point CDB$ROOT, PDB$SEED and all PDBs should be in 'MIGRATE' state
// Check that all container states = 'MIGRATE'
rows, err := db.Query("SELECT name,open_mode FROM v$containers")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var name, openMode string
if err := rows.Scan(&name, &openMode); err != nil {
return err
}
klog.InfoS("dbdaemon/setDatabaseUpgradeMode CONTAINER MODE: ", name, openMode)
if openMode != "MIGRATE" {
return fmt.Errorf("failed to turn container %v into MIGRATE mode: %w", name, err)
}
}
return nil
}
// Open all PDBs
func (d *DB) openPDBs(ctx context.Context) error {
db, err := sql.Open("godror", "oracle://?sysdba=1")
if err != nil {
return fmt.Errorf("dbdaemon/openPDBs: failed to open DB connection: %w", err)
}
defer db.Close()
// SQL> alter pluggable database all open
if _, err := db.Exec("alter pluggable database all open"); err != nil {
return err
}
return nil
}
// CreatePasswordFile is a Database Daemon method to create password file.
func (s *Server) CreatePasswordFile(ctx context.Context, req *dbdpb.CreatePasswordFileRequest) (*dbdpb.CreatePasswordFileResponse, error) {
if req.GetDatabaseName() == "" {
return nil, fmt.Errorf("missing database name for req: %v", req)
}
if req.GetSysPassword() == "" {
return nil, fmt.Errorf("missing password for req: %v", req)
}
passwordFile := fmt.Sprintf("%s/orapw%s", req.Dir, strings.ToUpper(req.DatabaseName))
params := []string{fmt.Sprintf("file=%s", passwordFile)}
params = append(params, fmt.Sprintf("password=%s", req.SysPassword))
params = append(params, fmt.Sprintf("ignorecase=n"))
if err := os.Remove(passwordFile); err != nil {
klog.Warningf("failed to remove %v: %v", passwordFile, err)
}
if err := s.osUtil.runCommand(orapwd(s.databaseHome), params); err != nil {
return nil, fmt.Errorf("orapwd cmd failed: %v", err)
}
return &dbdpb.CreatePasswordFileResponse{}, nil
}
// SetListenerRegistration is a Database Daemon method to create a static listener registration.
func (s *Server) SetListenerRegistration(ctx context.Context, req *dbdpb.SetListenerRegistrationRequest) (*dbdpb.BounceListenerResponse, error) {
return nil, fmt.Errorf("not implemented")
}
// physicalRestore runs
// 1. RMAN restore command
// 2. SQL to get latest SCN
// 3. RMAN recover command, created by applying SCN value
// to the recover statement template passed as a parameter.
func (s *Server) physicalRestore(ctx context.Context, req *dbdpb.PhysicalRestoreRequest) (*empty.Empty, error) {
errorPrefix := "dbdaemon/physicalRestore: "
if _, err := s.RunRMAN(ctx, &dbdpb.RunRMANRequest{Scripts: []string{req.GetRestoreStatement()}}); err != nil {
return nil, fmt.Errorf(errorPrefix+"failed to restore a database: %v", err)
}
scnResp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{req.GetLatestRecoverableScnQuery()}})
if err != nil || len(scnResp.GetMsg()) < 1 {
return nil, fmt.Errorf(errorPrefix+"failed to query archive log SCNs, results: %v, err: %v", scnResp, err)
}
row := make(map[string]string)
if err := json.Unmarshal([]byte(scnResp.GetMsg()[0]), &row); err != nil {
return nil, err
}
scn, ok := row["SCN"]
if !ok {
return nil, fmt.Errorf(errorPrefix + "failed to find column SCN in the archive log query")
}
latestSCN, err := strconv.ParseInt(scn, 10, 64)
if err != nil {
return nil, fmt.Errorf(errorPrefix+"failed to parse the SCN query (%v) to find int64: %v", scn, err)
}
recoverStmt := fmt.Sprintf(req.GetRecoverStatementTemplate(), latestSCN)
klog.InfoS(errorPrefix+"final recovery request", "recoverStmt", recoverStmt)
recoverReq := &dbdpb.RunRMANRequest{Scripts: []string{recoverStmt}}
if _, err := s.RunRMAN(ctx, recoverReq); err != nil {
return nil, fmt.Errorf(errorPrefix+"failed to recover a database: %v", err)
}
// always remove rman staging dir for restore from GCS
if err := os.RemoveAll(consts.RMANStagingDir); err != nil {
klog.Warningf("physicalRestore: can't cleanup staging dir from local disk.")
}
return &empty.Empty{}, nil
}
// PhysicalRestoreAsync turns physicalRestore into an async call.
func (s *Server) PhysicalRestoreAsync(ctx context.Context, req *dbdpb.PhysicalRestoreAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "PhysicalRestore", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.physicalRestore(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/PhysicalRestoreAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
// dataPumpImport runs impdp Oracle tool against existing PDB which
// imports data from a data pump .dmp file.
func (s *Server) dataPumpImport(ctx context.Context, req *dbdpb.DataPumpImportRequest) (*dbdpb.DataPumpImportResponse, error) {
s.syncJobs.pdbLoadMutex.Lock()
defer s.syncJobs.pdbLoadMutex.Unlock()
importFilename := "import.dmp"
logFilename := "import.log"
pdbPath := fmt.Sprintf(consts.PDBPathPrefix, consts.DataMount, s.databaseSid.val, strings.ToUpper(req.PdbName))
dumpDir := filepath.Join(pdbPath, consts.DpdumpDir.Linux)
klog.InfoS("dbdaemon/dataPumpImport", "dumpDir", dumpDir)
dmpReader, err := s.gcsUtil.Download(ctx, req.GcsPath)
if err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpImport: initiating GCS download failed: %v", err)
}
defer dmpReader.Close()
importFileFullPath := filepath.Join(dumpDir, importFilename)
if err := s.osUtil.createFile(importFileFullPath, dmpReader); err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpImport: download from GCS failed: %v", err)
}
klog.Infof("dbdaemon/dataPumpImport: downloaded import dmp file from %s to %s", req.GcsPath, importFileFullPath)
defer func() {
if err := s.osUtil.removeFile(importFileFullPath); err != nil {
klog.Warning(fmt.Sprintf("dbdaemon/dataPumpImport: failed to remove import dmp file after import: %v", err))
}
}()
impdpTarget, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.PDBLoaderUser, req.PdbName, req.DbDomain)
if err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpImport: failed to alter user %s", consts.PDBLoaderUser)
}
params := []string{impdpTarget}
params = append(params, req.CommandParams...)
params = append(params, fmt.Sprintf("directory=%s", consts.DpdumpDir.Oracle))
params = append(params, "dumpfile="+importFilename)
params = append(params, "logfile="+logFilename)
if err := s.runCommand(impdp(s.databaseHome), params); err != nil {
// On error code 5 (EX_SUCC_ERR), process completed reached the
// end but data in the DMP might have been skipped (foreign
// schemas, already imported tables, even failed schema imports
// because the DMP didn't include CREATE USER statements.)
if !s.osUtil.isReturnCodeEqual(err, 5) {
return nil, fmt.Errorf("data pump import failed, err = %v", err)
}
klog.Warning("dbdaemon/dataPumpImport: completed with EX_SUCC_ERR")
}
if len(req.GcsLogPath) > 0 {
logFullPath := filepath.Join(dumpDir, logFilename)
if err := s.gcsUtil.UploadFile(ctx, req.GcsLogPath, logFullPath, contentTypePlainText); err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpImport: import completed successfully, failed to upload import log to GCS: %v", err)
}
klog.Infof("dbdaemon/dataPumpImport: uploaded import log to %s", req.GcsLogPath)
}
return &dbdpb.DataPumpImportResponse{}, nil
}
// DataPumpImportAsync turns dataPumpImport into an async call.
func (s *Server) DataPumpImportAsync(ctx context.Context, req *dbdpb.DataPumpImportAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "DataPumpImport", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.dataPumpImport(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/DataPumpImportAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
// dataPumpExport runs expdp Oracle tool to export data to a data pump .dmp file.
func (s *Server) dataPumpExport(ctx context.Context, req *dbdpb.DataPumpExportRequest) (*dbdpb.DataPumpExportResponse, error) {
s.syncJobs.pdbLoadMutex.Lock()
defer s.syncJobs.pdbLoadMutex.Unlock()
dmpObjectType := "SCHEMAS"
exportName := fmt.Sprintf("export_%s", time.Now().Format("20060102150405"))
dmpFile := exportName + ".dmp"
dmpLogFile := exportName + ".log"
parFile := exportName + ".par"
if len(req.ObjectType) != 0 {
dmpObjectType = req.ObjectType
}
pdbPath := fmt.Sprintf(consts.PDBPathPrefix, consts.DataMount, s.databaseSid.val, strings.ToUpper(req.PdbName))
dmpPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, dmpFile) // full path
parPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, parFile)
klog.InfoS("dbdaemon/dataPumpExport", "dmpPath", dmpPath)
// Remove the dmp file from os if it already exists because oracle will not dump to existing files.
// expdp will log below errors:
// ORA-39000: bad dump file specification
// ORA-31641: unable to create dump file "/u02/app/oracle/oradata/TEST/PDB1/dmp/exportTable.dmp"
// ORA-27038: created file already exists
if err := os.Remove(dmpPath); err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("dataPumpExport failed: can't remove existing dmp file %s", dmpPath)
}
expdpTarget, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.PDBLoaderUser, req.PdbName, req.DbDomain)
if err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to alter user %s", consts.PDBLoaderUser)
}
var params []string
params = append(params, fmt.Sprintf("%s=%s", dmpObjectType, req.Objects))
params = append(params, fmt.Sprintf("DIRECTORY=%s", consts.DpdumpDir.Oracle))
params = append(params, fmt.Sprintf("DUMPFILE=%s", dmpFile))
params = append(params, fmt.Sprintf("LOGFILE=%s", dmpLogFile))
params = append(params, req.CommandParams...)
if len(req.FlashbackTime) != 0 {
params = append(params, fmt.Sprintf("FLASHBACK_TIME=%q", req.FlashbackTime))
}
// To avoid having to supply additional quotation marks on the command line, Oracle recommends the use of parameter files.
if err = writeParFile(parPath, params); err != nil {
return nil, fmt.Errorf("data pump export failed, err = %v", err)
}
cmdParams := []string{expdpTarget}
cmdParams = append(cmdParams, fmt.Sprintf("parfile=%s", parPath))
if err := s.runCommand(expdp(s.databaseHome), cmdParams); err != nil {
if s.osUtil.isReturnCodeEqual(err, 5) { // see dataPumpImport for an explanation of error code 5
return nil, fmt.Errorf("data pump export failed, err = %v", err)
}
klog.Warning("dbdaemon/dataPumpExport: completed with EX_SUCC_ERR")
}
klog.Infof("dbdaemon/dataPumpExport: export to %s completed successfully", dmpPath)
if err := s.gcsUtil.UploadFile(ctx, req.GcsPath, dmpPath, contentTypePlainText); err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to upload dmp file to %s: %v", req.GcsPath, err)
}
klog.Infof("dbdaemon/dataPumpExport: uploaded dmp file to %s", req.GcsPath)
if len(req.GcsLogPath) > 0 {
logPath := filepath.Join(pdbPath, consts.DpdumpDir.Linux, dmpLogFile)
if err := s.gcsUtil.UploadFile(ctx, req.GcsLogPath, logPath, contentTypePlainText); err != nil {
return nil, fmt.Errorf("dbdaemon/dataPumpExport: failed to upload log file to %s: %v", req.GcsLogPath, err)
}
klog.Infof("dbdaemon/dataPumpExport: uploaded log file to %s", req.GcsLogPath)
}
return &dbdpb.DataPumpExportResponse{}, nil
}
// writeParFile writes data pump export parameter file in parPath.
func writeParFile(parPath string, params []string) error {
f, err := os.Create(parPath)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
klog.Warningf("failed to close %v: %v", f, err)
}
}()
for _, param := range params {
if _, err := f.WriteString(param + "\n"); err != nil {
return err
}
}
return nil
}
// DataPumpExportAsync turns dataPumpExport into an async call.
func (s *Server) DataPumpExportAsync(ctx context.Context, req *dbdpb.DataPumpExportAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "DataPumpExport", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.dataPumpExport(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/DataPumpExportAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
// ListOperations returns a paged list of currently managed long running operations.
func (s *Server) ListOperations(ctx context.Context, req *lropb.ListOperationsRequest) (*lropb.ListOperationsResponse, error) {
return s.lroServer.ListOperations(ctx, req)
}
// GetOperation returns details of a requested long running operation.
func (s *Server) GetOperation(ctx context.Context, req *lropb.GetOperationRequest) (*lropb.Operation, error) {
return s.lroServer.GetOperation(ctx, req)
}
// DeleteOperation deletes a long running operation by its id.
func (s *Server) DeleteOperation(ctx context.Context, req *lropb.DeleteOperationRequest) (*empty.Empty, error) {
return s.lroServer.DeleteOperation(ctx, req)
}
func (s *Server) runCommand(bin string, params []string) error {
// Sets env to bounce a database|listener.
if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil {
return fmt.Errorf("failed to set env variable: %v", err)
}
return s.osUtil.runCommand(bin, params)
}
var newDB = func(driverName, dataSourceName string) (oracleDatabase, error) {
return sql.Open(driverName, dataSourceName)
}
// open returns a connection to the given database URL,
// When `prelim` is true, open will make a second connection attempt
// if the first connection fails.
//
// The caller is responsible for closing the returned connection.
//
// open method is created to break down runSQLPlusHelper and make the code
// testable, thus it returns interface oracleDatabase.
func open(ctx context.Context, dbURL string, prelim bool) (oracleDatabase, error) {
// "/ as sysdba"
db, err := newDB("godror", dbURL)
if err == nil {
// Force a connection with Ping.
err = db.Ping()
if err != nil {
// Connection pool opened but ping failed, close this pool.
if err := db.Close(); err != nil {
klog.Warningf("failed to close db connection: %v", err)
}
}
}
if err != nil {
klog.ErrorS(err, "dbdaemon/open: newDB failed", "prelim", prelim)
if prelim {
// If a prelim connection is requested (e.g. for creating
// an spfile, also enable DBMS_OUTPUT.
db, err = newDB("godror", dbURL+"&prelim=1")
}
}
if err != nil {
klog.ErrorS(err, "dbdaemon/open: newDB failed", "prelim", prelim)
return nil, err
}
return db, nil
}
func (d *DB) runSQL(ctx context.Context, sqls []string, prelim, suppress bool, db oracleDatabase) ([]string, error) {
sqlForLogging := strings.Join(sqls, ";")
if suppress {
sqlForLogging = "suppressed"
}
// This will fail on prelim connections, so ignore errors in that case
if _, err := db.ExecContext(ctx, "BEGIN DBMS_OUTPUT.ENABLE(); END;"); err != nil && !prelim {
klog.ErrorS(err, "dbdaemon/runSQL: failed to enable dbms_output", "sql", sqlForLogging)
return nil, err
}
klog.InfoS("dbdaemon/runSQL: running SQL statements", "sql", sqlForLogging)
output := []string{}
for _, sql := range sqls {
if _, err := db.ExecContext(ctx, sql); err != nil {
klog.ErrorS(err, "dbdaemon/runSQL: failed to execute", "sql", sqlForLogging)
return nil, err
}
out, err := dbmsOutputGetLines(ctx, db)
if err != nil && !prelim {
klog.ErrorS(err, "dbdaemon/runSQL: failed to get DBMS_OUTPUT", "sql", sqlForLogging)
return nil, err
}
output = append(output, out...)
}
return output, nil
}
func (d *DB) runQuery(ctx context.Context, sqls []string, db oracleDatabase) ([]string, error) {
klog.InfoS("dbdaemon/runQuery: running sql", "sql", sqls)
sqlLen := len(sqls)
for i := 0; i < sqlLen-1; i++ {
if _, err := db.ExecContext(ctx, sqls[i]); err != nil {
return nil, err
}
}
rows, err := db.QueryContext(ctx, sqls[sqlLen-1])
if err != nil {
klog.ErrorS(err, "dbdaemon/runQuery: failed to query a database", "sql", sqls[sqlLen-1])
return nil, err
}
defer rows.Close()
colNames, err := rows.Columns()
if err != nil {
klog.ErrorS(err, "dbdaemon/runQuery: failed to get column names for query", "sql", sqls[sqlLen-1])
return nil, err
}
var output []string
for rows.Next() {
// Store as strings, database/sql will handle conversion to
// string type for us in Rows.Scan.
data := make([]string, len(colNames))
dataPtr := make([]interface{}, len(colNames))
for i := range colNames {
dataPtr[i] = &data[i]
}
if err := rows.Scan(dataPtr...); err != nil {
klog.ErrorS(err, "dbdaemon/runQuery: failed to read a row")
return nil, err
}
// Convert row to JSON map
dataMap := map[string]string{}
for i, colName := range colNames {
dataMap[colName] = data[i]
}
j, err := json.Marshal(dataMap)
if err != nil {
klog.ErrorS(err, "dbdaemon/runQuery: failed to marshal a data map", "dataMap", dataMap)
return nil, err
}
output = append(output, string(j))
}
return output, nil
}
func (s *Server) runSQLPlusHelper(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest, formattedSQL bool) (*dbdpb.RunCMDResponse, error) {
if req.GetTnsAdmin() != "" {
if err := os.Setenv("TNS_ADMIN", req.GetTnsAdmin()); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
defer func() {
if err := os.Unsetenv("TNS_ADMIN"); err != nil {
klog.Warningf("failed to unset env variable: %v", err)
}
}()
}
sqls := req.GetCommands()
if len(sqls) < 1 {
return nil, fmt.Errorf("dbdaemon/RunSQLPlus requires a sql statement to run, provided: %d", len(sqls))
}
// formattedSQL = query, hence it is not an op that needs a prelim conn.
// Only enable prelim for known prelim queries, CREATE SPFILE and CREATE PFILE.
var prelim bool
if !formattedSQL && (strings.HasPrefix(strings.ToLower(sqls[0]), "create spfile") ||
strings.HasPrefix(strings.ToLower(sqls[0]), "create pfile")) {
prelim = true
}
// This default connect string requires the ORACLE_SID env variable to be set.
connectString := "oracle://?sysdba=1"
switch req.ConnectInfo.(type) {
case *dbdpb.RunSQLPlusCMDRequest_Dsn:
connectString = req.GetDsn()
case *dbdpb.RunSQLPlusCMDRequest_DatabaseName:
if err := os.Setenv("ORACLE_SID", req.GetDatabaseName()); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
case *dbdpb.RunSQLPlusCMDRequest_Local:
if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
default:
// For backward compatibility if connect_info field isn't defined in the request
// we fallback to the Local option.
if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
}
klog.InfoS("dbdaemon/runSQLPlusHelper: updated env ", "sid", s.databaseSid.val)
db, err := open(ctx, connectString, prelim)
if err != nil {
return nil, fmt.Errorf("dbdaemon/RunSQLPlus failed to open a database connection: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
klog.Warningf("failed to close db connection: %v", err)
}
}()
var o []string
if formattedSQL {
o, err = s.database.runQuery(ctx, sqls, db)
} else {
o, err = s.database.runSQL(ctx, sqls, prelim, req.GetSuppress(), db)
}
if err != nil {
klog.ErrorS(err, "dbdaemon/RunSQLPlus: error in execution", "formattedSQL", formattedSQL, "ORACLE_SID", s.databaseSid.val)
return nil, err
}
klog.InfoS("dbdaemon/RunSQLPlus", "output", strings.Join(o, "\n"))
return &dbdpb.RunCMDResponse{Msg: o}, nil
}
// RunSQLPlus executes oracle's sqlplus and returns output.
// This function only returns DBMS_OUTPUT and not any row data.
// To read from SELECTs use RunSQLPlusFormatted.
func (s *Server) RunSQLPlus(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) {
if req.GetSuppress() {
klog.InfoS("dbdaemon/RunSQLPlus", "req", "suppressed", "serverObj", s)
} else {
klog.InfoS("dbdaemon/RunSQLPlus", "req", req, "serverObj", s)
}
// Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID".
// Only add lock in top level API to avoid deadlock.
s.databaseSid.Lock()
defer s.databaseSid.Unlock()
return s.runSQLPlusHelper(ctx, req, false)
}
// RunSQLPlusFormatted executes a SQL command and returns the row results.
// If instead you want DBMS_OUTPUT please issue RunSQLPlus
func (s *Server) RunSQLPlusFormatted(ctx context.Context, req *dbdpb.RunSQLPlusCMDRequest) (*dbdpb.RunCMDResponse, error) {
if req.GetSuppress() {
klog.InfoS("dbdaemon/RunSQLPlusFormatted", "req", "suppressed", "serverObj", s)
} else {
klog.InfoS("dbdaemon/RunSQLPlusFormatted", "req", req, "serverObj", s)
}
sqls := req.GetCommands()
klog.InfoS("dbdaemon/RunSQLPlusFormatted: executing formatted SQL commands", "sql", sqls)
// Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID".
// Only add lock in top level API to avoid deadlock.
s.databaseSid.Lock()
defer s.databaseSid.Unlock()
return s.runSQLPlusHelper(ctx, req, true)
}
// KnownPDBs runs a database query returning a list of PDBs known
// to a database. By default it doesn't include a seed PDB.
// It also by default doesn't pay attention to a state of a PDB.
// A caller can overwrite both of the above settings with the flags.
func (s *Server) KnownPDBs(ctx context.Context, req *dbdpb.KnownPDBsRequest) (*dbdpb.KnownPDBsResponse, error) {
klog.InfoS("dbdaemon/KnownPDBs", "req", req, "serverObj", s)
// Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID".
// Only add lock in top level API to avoid deadlock.
s.databaseSid.RLock()
defer s.databaseSid.RUnlock()
knownPDBs, err := s.knownPDBs(ctx, req.GetIncludeSeed(), req.GetOnlyOpen())
if err != nil {
return nil, err
}
return &dbdpb.KnownPDBsResponse{KnownPdbs: knownPDBs}, nil
}
func (s *Server) knownPDBs(ctx context.Context, includeSeed, onlyOpen bool) ([]string, error) {
sql := consts.ListPDBsSQL
if !includeSeed {
where := "and name != 'PDB$SEED'"
sql = fmt.Sprintf("%s %s", sql, where)
}
if onlyOpen {
where := "and open_mode = 'READ WRITE'"
sql = fmt.Sprintf("%s %s", sql, where)
}
resp, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{sql}}, true)
if err != nil {
return nil, err
}
klog.InfoS("dbdaemon/knownPDBs", "resp", resp)
var knownPDBs []string
for _, msg := range resp.Msg {
row := make(map[string]string)
if err := json.Unmarshal([]byte(msg), &row); err != nil {
klog.ErrorS(err, "dbdaemon/knownPDBS: failed to unmarshal PDB query resultset")
return nil, err
}
if name, ok := row["NAME"]; ok {
knownPDBs = append(knownPDBs, name)
}
}
klog.InfoS("dbdaemon/knownPDBs", "knownPDBs", knownPDBs)
return knownPDBs, nil
}
func (s *Server) isKnownPDB(ctx context.Context, name string, includeSeed, onlyOpen bool) (bool, []string) {
knownPDBs, err := s.knownPDBs(ctx, includeSeed, onlyOpen)
if err != nil {
return false, nil
}
for _, pdb := range knownPDBs {
if pdb == strings.ToUpper(name) {
return true, knownPDBs
}
}
return false, knownPDBs
}
// CheckDatabaseState pings a database to check its status.
// This method has been tested for checking a CDB state.
func (s *Server) CheckDatabaseState(ctx context.Context, req *dbdpb.CheckDatabaseStateRequest) (*dbdpb.CheckDatabaseStateResponse, error) {
klog.InfoS("dbdaemon/CheckDatabaseState", "req", req, "serverObj", s)
reqDatabaseName := req.GetDatabaseName()
if reqDatabaseName == "" {
return nil, fmt.Errorf("a database check is requested, but a mandatory database name parameter is not provided (server: %v)", s)
}
var dbURL string
if req.GetIsCdb() {
// Local connection, set env variables.
if err := os.Setenv("ORACLE_SID", req.GetDatabaseName()); err != nil {
return nil, err
}
// Even for CDB check, use TNS connection to verify listener health.
cs, pass, err := security.SetupConnStringOnServer(ctx, s, consts.SecurityUser, req.GetDatabaseName(), req.GetDbDomain())
if err != nil {
return nil, fmt.Errorf("dbdaemon/CheckDatabaseState: failed to alter user %s", consts.SecurityUser)
}
dbURL = fmt.Sprintf("user=%q password=%q connectString=%q standaloneConnection=true",
consts.SecurityUser, pass, cs)
} else {
// A PDB that a Database Daemon is requested to operate on
// must be part of the Server object (set based on the metadata).
// (a "part of" is for a future support for multiple PDBs per CDB).
if known, knownPDBs := s.isKnownPDB(ctx, reqDatabaseName, false, false); !known {
return nil, fmt.Errorf("%q is not in the known PDB list: %v", reqDatabaseName, knownPDBs)
}
// Alter security password and if it's not been set yet.
if s.pdbConnStr == "" {
cs, err := security.SetupUserPwConnStringOnServer(ctx, s, consts.SecurityUser, reqDatabaseName, req.GetDbDomain())
if err != nil {
return nil, fmt.Errorf("dbdaemon/CheckDatabaseState: failed to alter user %s", consts.SecurityUser)
}
s.pdbConnStr = cs
}
// Use new PDB connection string to check PDB status.
dbURL = s.pdbConnStr
}
db, err := sql.Open("godror", dbURL)
if err != nil {
klog.ErrorS(err, "dbdaemon/CheckDatabaseState: failed to open a database")
return nil, err
}
defer db.Close()
if err := db.PingContext(ctx); err != nil {
klog.ErrorS(err, "dbdaemon/CheckDatabaseState: database not running")
return nil, fmt.Errorf("cannot connect to database %s: %v", reqDatabaseName, err)
}
return &dbdpb.CheckDatabaseStateResponse{}, nil
}
// RunRMAN will run the script to execute RMAN and create a physical backup in the target directory, then back it up to GCS if requested
func (s *Server) RunRMAN(ctx context.Context, req *dbdpb.RunRMANRequest) (*dbdpb.RunRMANResponse, error) {
// Required for local connections (when no SID is specified on connect string).
// Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID".
// Only add lock in top level API to avoid deadlock.
if req.GetSuppress() {
klog.Info("RunRMAN", "request", "suppressed")
} else {
klog.Info("RunRMAN", "request", req)
}
s.databaseSid.RLock()
defer s.databaseSid.RUnlock()
if err := os.Setenv("ORACLE_SID", s.databaseSid.val); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
if req.GetTnsAdmin() != "" {
if err := os.Setenv("TNS_ADMIN", req.GetTnsAdmin()); err != nil {
return nil, fmt.Errorf("failed to set env variable: %v", err)
}
defer func() {
if err := os.Unsetenv("TNS_ADMIN"); err != nil {
klog.Warningf("failed to unset env variable: %v", err)
}
}()
}
scripts := req.GetScripts()
if len(scripts) < 1 {
return nil, fmt.Errorf("RunRMAN requires at least 1 script to run, provided: %d", len(scripts))
}
var res []string
for _, script := range scripts {
target := "/"
if req.GetTarget() != "" {
target = req.GetTarget()
}
args := []string{fmt.Sprintf("target=%s", target)}
if req.GetAuxiliary() != "" {
args = append(args, fmt.Sprintf("auxiliary=%s", req.Auxiliary))
}
args = append(args, "@/dev/stdin")
cmd := exec.Command(rman(s.databaseHome), args...)
cmd.Stdin = strings.NewReader(script)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("RunRMAN failed,\nscript: %q\nFailed with: %v\nErr: %v", script, string(out), err)
}
res = append(res, string(out))
if req.GetGcsPath() != "" && req.GetCmd() == consts.RMANBackup {
if err = s.uploadDirectoryContentsToGCS(ctx, consts.RMANStagingDir, req.GetGcsPath()); err != nil {
klog.ErrorS(err, "GCS Upload error:")
return nil, err
}
}
}
return &dbdpb.RunRMANResponse{Output: res}, nil
}
// RunRMANAsync turns RunRMAN into an async call.
func (s *Server) RunRMANAsync(ctx context.Context, req *dbdpb.RunRMANAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "RMAN", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.RunRMAN(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/RunRMANAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
func (s *Server) uploadDirectoryContentsToGCS(ctx context.Context, backupDir, gcsPath string) error {
klog.InfoS("RunRMAN: uploadDirectoryContentsToGCS", "backupdir", backupDir, "gcsPath", gcsPath)
err := filepath.Walk(backupDir, func(fpath string, info os.FileInfo, errInner error) error {
klog.InfoS("RunRMAN: walking...", "fpath", fpath, "info", info, "errInner", errInner)
if errInner != nil {
return errInner
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(backupDir, fpath)
if err != nil {
return errors.Errorf("filepath.Rel(%s, %s) returned err: %s", backupDir, fpath, err)
}
gcsTarget, err := url.Parse(gcsPath)
if err != nil {
return errors.Errorf("invalid GcsPath err: %v", err)
}
gcsTarget.Path = path.Join(gcsTarget.Path, relPath)
klog.InfoS("gcs", "target", gcsTarget)
start := time.Now()
err = s.gcsUtil.UploadFile(ctx, gcsTarget.String(), fpath, contentTypePlainText)
if err != nil {
return err
}
end := time.Now()
rate := float64(info.Size()) / (end.Sub(start).Seconds())
klog.InfoS("dbdaemon/uploadDirectoryContentsToGCS", "uploaded", gcsTarget.String(), "throughput", fmt.Sprintf("%f MB/s", rate/1024/1024))
return nil
})
if err := os.RemoveAll(consts.RMANStagingDir); err != nil {
klog.Warningf("uploadDirectoryContentsToGCS: can't cleanup staging dir from local disk.")
}
return err
}
// NID changes a database id and/or database name.
func (s *Server) NID(ctx context.Context, req *dbdpb.NIDRequest) (*dbdpb.NIDResponse, error) {
params := []string{"target=/"}
if req.GetSid() == "" {
return nil, fmt.Errorf("dbdaemon/NID: missing sid for req: %v", req)
}
if err := os.Setenv("ORACLE_SID", req.GetSid()); err != nil {
return nil, fmt.Errorf("dbdaemon/NID: set env ORACLE_SID failed: %v", err)
}
// Add lock to protect server state "databaseSid" and os env variable "ORACLE_SID".
// When renaming the DB, DB is not ready to run cmds or SQLs, it seems to be ok to block all other APIs for now.
s.databaseSid.Lock()
defer s.databaseSid.Unlock()
if req.GetDatabaseName() != "" {
s.databaseSid.val = req.GetDatabaseName()
params = append(params, fmt.Sprintf("dbname=%s", req.GetDatabaseName()))
}
params = append(params, "logfile=/home/oracle/nid.log")
_, err := s.dbdClient.ProxyRunNID(ctx, &dbdpb.ProxyRunNIDRequest{Params: params, DestDbName: req.GetDatabaseName()})
if err != nil {
return nil, fmt.Errorf("nid failed: %v", err)
}
klog.InfoS("dbdaemon/NID: done", "req", req)
return &dbdpb.NIDResponse{}, nil
}
// GetDatabaseType returns database type, eg. ORACLE_12_2_ENTERPRISE_NONCDB
func (s *Server) GetDatabaseType(ctx context.Context, req *dbdpb.GetDatabaseTypeRequest) (*dbdpb.GetDatabaseTypeResponse, error) {
f, err := os.Open(consts.OraTab)
if err != nil {
return nil, fmt.Errorf("GetDatabaseType: failed to open %q", consts.OraTab)
}
defer func() {
if err := f.Close(); err != nil {
klog.Warningf("failed to close %v: %v", f, err)
}
}()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
// The content of oratab is expected to be of the form:
// # comments
// <CDB name>:DatabaseHome:<Y/N>
// # DATABASETYPE:ORACLE_12_2_ENTERPRISE_NONCDB
if !strings.HasPrefix(line, "# DATABASETYPE") {
continue
}
fragment := strings.Split(line, ":")
if len(fragment) != 2 {
return nil, fmt.Errorf("GetDatabaseType: failed to parse %q for database type(number of fields is %d, not 2)", consts.OraTab, len(fragment))
}
switch fragment[1] {
case "ORACLE_12_2_ENTERPRISE":
return &dbdpb.GetDatabaseTypeResponse{
DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE,
}, nil
case "ORACLE_12_2_ENTERPRISE_NONCDB":
return &dbdpb.GetDatabaseTypeResponse{
DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE_NONCDB,
}, nil
default:
return nil, fmt.Errorf("GetDatabaseType: failed to get valid database type from %q", consts.OraTab)
}
}
// For backward compatibility, return ORACLE_12_2_ENTERPRISE by default
return &dbdpb.GetDatabaseTypeResponse{
DatabaseType: dbdpb.GetDatabaseTypeResponse_ORACLE_12_2_ENTERPRISE,
}, nil
}
// GetDatabaseName returns database name.
func (s *Server) GetDatabaseName(ctx context.Context, req *dbdpb.GetDatabaseNameRequest) (*dbdpb.GetDatabaseNameResponse, error) {
//databaseSid value will be set in dbdserver's constructor and NID API with write lock.
//databaseSid is expected to be valid in dbdserver's life cycle.
s.databaseSid.RLock()
defer s.databaseSid.RUnlock()
return &dbdpb.GetDatabaseNameResponse{DatabaseName: s.databaseSid.val}, nil
}
// BounceDatabase starts/stops request specified database.
func (s *Server) BounceDatabase(ctx context.Context, req *dbdpb.BounceDatabaseRequest) (*dbdpb.BounceDatabaseResponse, error) {
klog.InfoS("BounceDatabase request delegated to proxy", "req", req)
database, err := s.dbdClient.BounceDatabase(ctx, req)
if err != nil {
msg := "dbdaemon/BounceDatabase: error while bouncing database"
klog.InfoS(msg, "err", err)
return nil, fmt.Errorf("%s: %v", msg, err)
}
if req.Operation == dbdpb.BounceDatabaseRequest_STARTUP && !req.GetAvoidConfigBackup() {
if err := s.BackupConfigFile(ctx, s.databaseSid.val); err != nil {
msg := "dbdaemon/BounceDatabase: error while backing up config file: err"
klog.InfoS(msg, "err", err)
return nil, fmt.Errorf("%s: %v", msg, err)
}
klog.InfoS("dbdaemon/BounceDatabase start operation: config file backup successful")
}
return database, err
}
// BounceListener starts/stops request specified listener.
func (s *Server) BounceListener(ctx context.Context, req *dbdpb.BounceListenerRequest) (*dbdpb.BounceListenerResponse, error) {
klog.InfoS("BounceListener request delegated to proxy", "req", req)
return s.dbdClient.BounceListener(ctx, req)
}
func (s *Server) close() {
if err := s.dbdClientClose(); err != nil {
klog.Warningf("failed to close dbdaemon client: %v", err)
}
}
// BootstrapStandby perform bootstrap tasks for standby instance.
func (s *Server) BootstrapStandby(ctx context.Context, req *dbdpb.BootstrapStandbyRequest) (*dbdpb.BootstrapStandbyResponse, error) {
klog.InfoS("dbdaemon/BootstrapStandby", "req", req)
cdbName := req.GetCdbName()
spfile := filepath.Join(fmt.Sprintf(consts.ConfigDir, consts.DataMount, cdbName), fmt.Sprintf("spfile%s.ora", cdbName))
resp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{"select value from v$parameter where name='spfile'"}})
if err != nil || len(resp.GetMsg()) < 1 {
return nil, fmt.Errorf("dbdaemon/BootstrapStandby: failed to check spfile, results: %v, err: %v", resp, err)
}
row := make(map[string]string)
if err := json.Unmarshal([]byte(resp.GetMsg()[0]), &row); err != nil {
return nil, err
}
value, _ := row["VALUE"]
if value != "" {
spfile = value
} else {
_, err := s.RunSQLPlus(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{fmt.Sprintf("create spfile='%s' from memory", spfile)}, Suppress: false})
if err != nil {
return nil, fmt.Errorf("dbdaemon/BootstrapStandby: failed to create spfile from memory: %v", err)
}
}
if _, err = s.dbdClient.SetEnv(ctx, &dbdpb.SetEnvRequest{
OracleHome: s.databaseHome,
CdbName: req.GetCdbName(),
SpfilePath: spfile,
}); err != nil {
return nil, fmt.Errorf("dbdaemon/BootstrapStandby: proxy failed to SetEnv: %v", err)
}
klog.InfoS("dbdaemon/BootstrapStandby: spfile creation/relocation completed successfully")
if err := markProvisioned(); err != nil {
return nil, fmt.Errorf("dbdaemon/BootstrapStandby: error while creating provisioning file: %v", err)
}
klog.InfoS("dbdaemon/BootstrapStandby: Provisioning file created successfully")
return &dbdpb.BootstrapStandbyResponse{}, nil
}
// createCDB creates a database instance
func (s *Server) createCDB(ctx context.Context, req *dbdpb.CreateCDBRequest) (*dbdpb.CreateCDBResponse, error) {
klog.InfoS("CreateCDB request invoked", "req", req)
password, err := security.RandOraclePassword()
if err != nil {
return nil, fmt.Errorf("error generating temporary password")
}
characterSet := req.GetCharacterSet()
sid := req.GetDatabaseName()
memoryPercent := req.GetMemoryPercent()
var initParams string
if sid == "" {
return nil, fmt.Errorf("dbdaemon/CreateCDB: DBname is empty")
}
if characterSet == "" {
characterSet = "AL32UTF8"
}
if memoryPercent == 0 {
memoryPercent = 25
}
if req.GetAdditionalParams() == nil {
initParams = strings.Join(provision.MapToSlice(provision.GetDefaultInitParams(req.DatabaseName)), ",")
if req.GetDbDomain() != "" {
initParams = fmt.Sprintf("%s,DB_DOMAIN=%s", initParams, req.GetDbDomain())
}
} else {
foundDBDomain := false
for _, param := range req.GetAdditionalParams() {
if strings.Contains(strings.ToUpper(param), "DB_DOMAIN=") {
foundDBDomain = true
break
}
}
initParamsArr := req.GetAdditionalParams()
if !foundDBDomain && req.GetDbDomain() != "" {
initParamsArr = append(initParamsArr, fmt.Sprintf("DB_DOMAIN=%s", req.GetDbDomain()))
}
initParamsMap, err := provision.MergeInitParams(provision.GetDefaultInitParams(req.DatabaseName), initParamsArr)
if err != nil {
return nil, fmt.Errorf("error while merging user defined init params with default values, %v", err)
}
initParamsArr = provision.MapToSlice(initParamsMap)
initParams = strings.Join(initParamsArr, ",")
}
params := []string{
"-silent",
"-createDatabase",
"-templateName", "General_Purpose.dbc",
"-gdbName", sid,
"-responseFile", "NO_VALUE",
"-createAsContainerDatabase", strconv.FormatBool(true),
"-sid", sid,
"-characterSet", characterSet,
fmt.Sprintf("-memoryPercentage"), strconv.FormatInt(int64(memoryPercent), 10),
"-emConfiguration", "NONE",
"-datafileDestination", oraDataDir,
"-storageType", "FS",
"-initParams", initParams,
"-databaseType", "MULTIPURPOSE",
"-recoveryAreaDestination", "/u03/app/oracle/fast_recovery_area",
"-sysPassword", password,
"-systemPassword", password,
}
_, err = s.dbdClient.ProxyRunDbca(ctx, &dbdpb.ProxyRunDbcaRequest{OracleHome: s.databaseHome, DatabaseName: req.DatabaseName, Params: params})
if err != nil {
return nil, fmt.Errorf("error while running dbca command: %v", err)
}
klog.InfoS("dbdaemon/CreateCDB: CDB created successfully")
if _, err := s.BounceDatabase(ctx, &dbdpb.BounceDatabaseRequest{
Operation: dbdpb.BounceDatabaseRequest_SHUTDOWN,
DatabaseName: req.GetDatabaseName(),
}); err != nil {
return nil, fmt.Errorf("dbdaemon/CreateCDB: shutdown failed: %v", err)
}
klog.InfoS("dbdaemon/CreateCDB successfully completed")
return &dbdpb.CreateCDBResponse{}, nil
}
// CreateFile creates file based on request.
func (s *Server) CreateFile(ctx context.Context, req *dbdpb.CreateFileRequest) (*dbdpb.CreateFileResponse, error) {
klog.InfoS("dbdaemon/CreateFile: ", "req", req)
if err := s.osUtil.createFile(req.GetPath(), strings.NewReader(req.GetContent())); err != nil {
return nil, fmt.Errorf("dbdaemon/CreateFile: create failed: %v", err)
}
return &dbdpb.CreateFileResponse{}, nil
}
// CreateCDBAsync turns CreateCDB into an async call.
func (s *Server) CreateCDBAsync(ctx context.Context, req *dbdpb.CreateCDBAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "CreateCDB", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.createCDB(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/CreateCDBAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
func setEnvNew(s *Server, home string, dbName string) error {
s.databaseHome = home
s.databaseSid.val = dbName
if err := provision.RelinkConfigFiles(home, dbName); err != nil {
return err
}
return nil
}
// markProvisioned creates a flag file to indicate that CDB provisioning completed successfully
func markProvisioned() error {
f, err := os.Create(consts.ProvisioningDoneFile)
if err != nil {
return fmt.Errorf("could not create %s file: %v", consts.ProvisioningDoneFile, err)
}
defer func() {
if err := f.Close(); err != nil {
klog.Warningf("failed to close %v: %v", f, err)
}
}()
return nil
}
// A user running this program should not be root and
// a primary group should be either dba or oinstall.
func oracleUserUIDGID(skipChecking bool) (uint32, uint32, error) {
if skipChecking {
klog.InfoS("oracleUserUIDGID: skipped by request")
return 0, 0, nil
}
u, err := user.Lookup(consts.OraUser)
if err != nil {
return 0, 0, fmt.Errorf("oracleUserUIDGID: could not determine the current user: %v", err)
}
if u.Username == "root" {
return 0, 0, fmt.Errorf("oracleUserUIDGID: this program is designed to run by the Oracle software installation owner (e.g. oracle), not %q", u.Username)
}
groups := consts.OraGroup
var gids []string
for _, group := range groups {
g, err := user.LookupGroup(group)
// Not both groups are mandatory, e.g. oinstall may not exist.
klog.InfoS("group=%s, g=%v", group, g)
if err != nil {
continue
}
gids = append(gids, g.Gid)
}
for _, g := range gids {
if u.Gid == g {
usr, err := strconv.ParseUint(u.Uid, 10, 32)
if err != nil {
return 0, 0, err
}
grp, err := strconv.ParseUint(u.Gid, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(usr), uint32(grp), nil
}
}
return 0, 0, fmt.Errorf("oracleUserUIDGID: current user's primary group (GID=%q) is not dba|oinstall (GID=%q)", u.Gid, gids)
}
// CreateListener create a new listener for the database.
func (s *Server) CreateListener(ctx context.Context, req *dbdpb.CreateListenerRequest) (*dbdpb.CreateListenerResponse, error) {
domain := req.GetDbDomain()
if req.GetDbDomain() != "" {
domain = fmt.Sprintf(".%s", req.GetDbDomain())
}
uid, gid, err := oracleUserUIDGID(true)
if err != nil {
return nil, fmt.Errorf("initDBListeners: get uid gid failed: %v", err)
}
l := &provision.ListenerInput{
DatabaseName: req.DatabaseName,
DatabaseBase: consts.OracleBase,
DatabaseHome: s.databaseHome,
DatabaseHost: s.hostName,
DBDomain: domain,
}
pdbNames, err := s.fetchPDBNames(ctx)
if err != nil {
return nil, err
}
l.PluggableDatabaseNames = pdbNames
lType := consts.SECURE
lDir := filepath.Join(listenerDir, lType)
listenerFileContent, tnsFileContent, sqlNetContent, err := provision.LoadTemplateListener(l, lType, fmt.Sprint(req.Port), req.Protocol)
if err != nil {
return &dbdpb.CreateListenerResponse{}, fmt.Errorf("initDBListeners: loading template for listener %q failed: %v", req.DatabaseName, err)
}
if err != nil {
return nil, fmt.Errorf("initDBListeners: error while fetching uid gid: %v", err)
}
if err := provision.MakeDirs(ctx, []string{lDir}, uid, gid); err != nil {
return nil, fmt.Errorf("initDBListeners: making a listener directory %q failed: %v", lDir, err)
}
// Prepare listener.ora.
if err := ioutil.WriteFile(filepath.Join(lDir, "listener.ora"), []byte(listenerFileContent), 0600); err != nil {
return nil, fmt.Errorf("initDBListeners: creating a listener.ora file failed: %v", err)
}
// Prepare sqlnet.ora.
if err := ioutil.WriteFile(filepath.Join(lDir, "sqlnet.ora"), []byte(sqlNetContent), 0600); err != nil {
return nil, fmt.Errorf("initDBListeners: unable to write sqlnet: %v", err)
}
// Prepare tnsnames.ora.
if err := ioutil.WriteFile(filepath.Join(lDir, "tnsnames.ora"), []byte(tnsFileContent), 0600); err != nil {
return nil, fmt.Errorf("initDBListeners: creating a tnsnames.ora file failed: %v", err)
}
if _, err := s.BounceListener(ctx, &dbdpb.BounceListenerRequest{
Operation: dbdpb.BounceListenerRequest_STOP,
ListenerName: lType,
TnsAdmin: lDir,
}); err != nil {
klog.ErrorS(err, "Listener stop failed", "name", lType, "lDir", lDir)
}
if _, err := s.BounceListener(ctx, &dbdpb.BounceListenerRequest{
Operation: dbdpb.BounceListenerRequest_START,
ListenerName: lType,
TnsAdmin: lDir,
}); err != nil {
return nil, fmt.Errorf("listener %s startup failed: %s, %v", lType, lDir, err)
}
return &dbdpb.CreateListenerResponse{}, nil
}
func (s *Server) fetchPDBNames(ctx context.Context) ([]string, error) {
sqlResp, err := s.RunSQLPlusFormatted(ctx, &dbdpb.RunSQLPlusCMDRequest{
Commands: []string{consts.ListPluggableDatabaseExcludeSeedSQL},
Suppress: false,
})
if err != nil {
return nil, fmt.Errorf("BootstrapTask: query pdb names failed: %v", err)
}
pdbNames := sqlResp.GetMsg()
knownPDBs := make([]string, len(pdbNames))
for i, msg := range pdbNames {
row := make(map[string]string)
if err := json.Unmarshal([]byte(msg), &row); err != nil {
return knownPDBs, err
}
if name, ok := row["PDB_NAME"]; ok {
knownPDBs[i] = name
}
}
klog.InfoS("BootstrapTask: Found known pdbs", "knownPDBs", knownPDBs)
return knownPDBs, nil
}
// FileExists is used to check an existence of a file (e.g. useful for provisioning).
func (s *Server) FileExists(ctx context.Context, req *dbdpb.FileExistsRequest) (*dbdpb.FileExistsResponse, error) {
host, err := os.Hostname()
if err != nil {
return &dbdpb.FileExistsResponse{}, fmt.Errorf("dbdaemon/FileExists: failed to get host name: %v", err)
}
file := req.GetName()
if _, err := os.Stat(file); err == nil {
klog.InfoS("dbdaemon/FileExists", "requested file", file, "result", "found")
return &dbdpb.FileExistsResponse{Exists: true}, nil
}
if os.IsNotExist(err) {
klog.InfoS("dbdaemon/FileExists", "requested file", file, "on host", host, "result", "NOT found")
return &dbdpb.FileExistsResponse{Exists: false}, nil
}
// Something is wrong, return error.
klog.Errorf("dbdaemon/FileExists: failed to determine the status of a requested file %q on host %q: %v", file, host, err)
return &dbdpb.FileExistsResponse{}, err
}
// CreateDirs RPC call to create directories along with any necessary parents.
func (s *Server) CreateDirs(ctx context.Context, req *dbdpb.CreateDirsRequest) (*dbdpb.CreateDirsResponse, error) {
for _, dirInfo := range req.GetDirs() {
if err := os.MkdirAll(dirInfo.GetPath(), os.FileMode(dirInfo.GetPerm())); err != nil {
return nil, fmt.Errorf("dbdaemon/CreateDirs failed on dir %s: %v", dirInfo.GetPath(), err)
}
}
return &dbdpb.CreateDirsResponse{}, nil
}
// ReadDir RPC call to read the directory named by path and returns Fileinfos for the path and children.
func (s *Server) ReadDir(ctx context.Context, req *dbdpb.ReadDirRequest) (*dbdpb.ReadDirResponse, error) {
if !strings.HasPrefix(req.GetPath(), "/") {
return nil, fmt.Errorf("dbdaemon/ReadDir failed to read %v, only accept absolute path", req.GetPath())
}
currFileInfo, err := os.Stat(req.GetPath())
if err != nil {
return nil, fmt.Errorf("dbdaemon/ReadDir os.Stat(%v) failed: %v ", req.GetPath(), err)
}
rpcCurrFileInfo, err := convertToRpcFileInfo(currFileInfo, req.GetPath())
if err != nil {
return nil, fmt.Errorf("dbdaemon/ReadDir failed: %v ", err)
}
resp := &dbdpb.ReadDirResponse{
CurrPath: rpcCurrFileInfo,
}
if !currFileInfo.IsDir() {
// for a file, just return its fileInfo
return resp, nil
}
if req.GetRecursive() {
if err := filepath.Walk(req.GetPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
// stop walking if we see any error.
return fmt.Errorf("visit %v, %v failed: %v", path, info, err)
}
if len(resp.SubPaths) >= maxWalkFiles {
return fmt.Errorf("visited more than %v files, try reduce the dir scope", maxWalkFiles)
}
if path == req.GetPath() {
return nil
}
rpcInfo, err := convertToRpcFileInfo(info, path)
if err != nil {
return fmt.Errorf("visit %v, %v failed: %v ", info, path, err)
}
resp.SubPaths = append(resp.SubPaths, rpcInfo)
return nil
}); err != nil {
return nil, fmt.Errorf("dbdaemon/ReadDir filepath.Walk(%v) failed: %v ", req.GetPath(), err)
}
} else {
subFileInfos, err := ioutil.ReadDir(req.GetPath())
if err != nil {
return nil, fmt.Errorf("dbdaemon/ReadDir ioutil.ReadDir(%v) failed: %v ", req.GetPath(), err)
}
for _, info := range subFileInfos {
rpcInfo, err := convertToRpcFileInfo(info, filepath.Join(req.GetPath(), info.Name()))
if err != nil {
return nil, fmt.Errorf("dbdaemon/ReadDir failed: %v ", err)
}
resp.SubPaths = append(resp.SubPaths, rpcInfo)
}
}
return resp, nil
}
func convertToRpcFileInfo(info os.FileInfo, absPath string) (*dbdpb.ReadDirResponse_FileInfo, error) {
timestampProto, err := ptypes.TimestampProto(info.ModTime())
if err != nil {
return nil, fmt.Errorf("convertToRpcFileInfo(%v) failed: %v", info, err)
}
return &dbdpb.ReadDirResponse_FileInfo{
Name: info.Name(),
Size: info.Size(),
Mode: uint32(info.Mode()),
ModTime: timestampProto,
IsDir: info.IsDir(),
AbsPath: absPath,
}, nil
}
// DeleteDir removes path and any children it contains.
func (s *Server) DeleteDir(ctx context.Context, req *dbdpb.DeleteDirRequest) (*dbdpb.DeleteDirResponse, error) {
removeFun := os.Remove
if req.GetForce() {
removeFun = os.RemoveAll
}
if err := removeFun(req.GetPath()); err != nil {
return nil, fmt.Errorf("dbdaemon/DeleteDir(%v) failed: %v", req, err)
}
return &dbdpb.DeleteDirResponse{}, nil
}
// BackupConfigFile converts the binary spfile to human readable pfile and
// creates a snapshot copy named pfile.lkws (lkws -> last known working state).
// This file will be used for recovery in the event of parameter update workflow
// failure due to bad static parameters.
func (s *Server) BackupConfigFile(ctx context.Context, cdbName string) error {
configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, cdbName)
backupPFileLoc := fmt.Sprintf("%s/%s", configDir, "pfile.lkws")
klog.InfoS("dbdaemon/BackupConfigFile: backup config file", "backupPFileLoc", backupPFileLoc)
_, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{Commands: []string{fmt.Sprintf("create pfile='%s' from spfile", backupPFileLoc)}}, false)
if err != nil {
klog.InfoS("dbdaemon/BackupConfigFile: error while backing up config file", "err", err)
return fmt.Errorf("BackupConfigFile: failed to create pfile due to error: %v", err)
}
klog.InfoS("dbdaemon/BackupConfigFile: Successfully backed up config file")
return nil
}
// RecoverConfigFile generates the binary spfile from the human readable backup pfile
func (s *Server) RecoverConfigFile(ctx context.Context, req *dbdpb.RecoverConfigFileRequest) (*dbdpb.RecoverConfigFileResponse, error) {
configDir := fmt.Sprintf(consts.ConfigDir, consts.DataMount, req.GetCdbName())
backupPFileLoc := fmt.Sprintf("%s/%s", configDir, "pfile.lkws")
spFileLoc := fmt.Sprintf("%s/%s", configDir, fmt.Sprintf("spfile%s.ora", req.CdbName))
klog.InfoS("dbdaemon/RecoverConfigFile: recover config file", "backupPFileLoc", backupPFileLoc, "spFileLoc", spFileLoc)
_, err := s.runSQLPlusHelper(ctx, &dbdpb.RunSQLPlusCMDRequest{
Commands: []string{fmt.Sprintf("create spfile='%s' from pfile='%s'", spFileLoc, backupPFileLoc)}}, false)
if err != nil {
klog.InfoS("dbdaemon/RecoverConfigFile: error while backing up config file", "err", err)
return nil, fmt.Errorf("dbdaemon/RecoverConfigFile: error while backing up config file: %v", err)
}
klog.InfoS("dbdaemon/RecoverConfigFile: Successfully backed up config file")
return &dbdpb.RecoverConfigFileResponse{}, nil
}
// New creates a new dbdaemon server.
func New(ctx context.Context, cdbNameFromYaml string) (*Server, error) {
klog.InfoS("dbdaemon/New: Dialing dbdaemon proxy")
conn, err := common.DatabaseDaemonDialSocket(ctx, consts.ProxyDomainSocketFile, grpc.WithBlock())
if err != nil {
return nil, fmt.Errorf("failed to dial to database daemon: %v", err)
}
klog.InfoS("dbdaemon/New: Successfully connected to dbdaemon proxy")
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("failed to get hostname: %v", err)
}
s := &Server{
hostName: hostname,
database: &DB{},
osUtil: &osUtilImpl{},
databaseSid: &syncState{},
dbdClient: dbdpb.NewDatabaseDaemonProxyClient(conn),
dbdClientClose: conn.Close,
lroServer: lro.NewServer(ctx),
syncJobs: &syncJobs{},
gcsUtil: &gcsUtilImpl{},
}
oracleHome := os.Getenv("ORACLE_HOME")
if err := setEnvNew(s, oracleHome, cdbNameFromYaml); err != nil {
return nil, fmt.Errorf("failed to setup environment: %v", err)
}
return s, nil
}
// DownloadDirectoryFromGCS downloads objects from GCS bucket using prefix
func (s *Server) DownloadDirectoryFromGCS(ctx context.Context, req *dbdpb.DownloadDirectoryFromGCSRequest) (*dbdpb.DownloadDirectoryFromGCSResponse, error) {
klog.Infof("dbdaemon/DownloadDirectoryFromGCS: req %v", req)
bucket, prefix, err := s.gcsUtil.SplitURI(req.GcsPath)
if err != nil {
return nil, fmt.Errorf("failed to parse gcs path %s", err)
}
if req.GetAccessPermissionCheck() {
klog.Info("dbdaemon/downloadDirectoryFromGCS: verify the access permission of the given GCS path")
} else {
klog.Infof("dbdaemon/downloadDirectoryFromGCS: destination path is %s", req.GetLocalPath())
}
client, err := storage.NewClient(ctx)
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, time.Second*3600)
defer cancel()
it := client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: prefix,
})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, fmt.Errorf("Bucket(%q).Objects(): %v", bucket, err)
}
if req.GetAccessPermissionCheck() {
reader, err := client.Bucket(bucket).Object(attrs.Name).NewRangeReader(ctx, 0, 1)
if err != nil {
return nil, fmt.Errorf("failed to read URL %s: %v", attrs.Name, err)
}
reader.Close()
} else {
if err := s.downloadFile(ctx, client, bucket, attrs.Name, prefix, req.GetLocalPath()); err != nil {
return nil, fmt.Errorf("failed to download file %s", err)
}
}
}
return &dbdpb.DownloadDirectoryFromGCSResponse{}, nil
}
// FetchServiceImageMetaData fetches the image metadata via the dbdaemon proxy.
func (s *Server) FetchServiceImageMetaData(ctx context.Context, req *dbdpb.FetchServiceImageMetaDataRequest) (*dbdpb.FetchServiceImageMetaDataResponse, error) {
proxyResponse, err := s.dbdClient.ProxyFetchServiceImageMetaData(ctx, &dbdpb.ProxyFetchServiceImageMetaDataRequest{})
if err != nil {
return &dbdpb.FetchServiceImageMetaDataResponse{}, err
}
return &dbdpb.FetchServiceImageMetaDataResponse{Version: proxyResponse.Version, CdbName: proxyResponse.CdbName, OracleHome: proxyResponse.OracleHome, SeededImage: proxyResponse.SeededImage}, nil
}
func (s *Server) downloadFile(ctx context.Context, c *storage.Client, bucket, gcsPath, baseDir, dest string) error {
reader, err := c.Bucket(bucket).Object(gcsPath).NewReader(ctx)
if err != nil {
return fmt.Errorf("failed to read URL %s: %v", gcsPath, err)
}
defer reader.Close()
relPath, err := filepath.Rel(baseDir, gcsPath)
if err != nil {
return fmt.Errorf("failed to parse relPath for gcsPath %s", gcsPath)
}
f := filepath.Join(dest, relPath)
start := time.Now()
if err := s.osUtil.createFile(f, reader); err != nil {
return fmt.Errorf("failed to createFile for file %s, err %s", f, err)
}
end := time.Now()
rate := float64(reader.Attrs.Size) / (end.Sub(start).Seconds())
klog.InfoS("dbdaemon/downloadFile:", "downloaded", f, "throughput", fmt.Sprintf("(%f MB/s)", rate/1024/1024))
return nil
}
// bootstrapDatabase invokes init_oracle on dbdaemon_proxy to perform bootstrap tasks for seeded image
func (s *Server) bootstrapDatabase(ctx context.Context, req *dbdpb.BootstrapDatabaseRequest) (*dbdpb.BootstrapDatabaseResponse, error) {
cmd := "free -m | awk '/Mem/ {print $2}'"
out, err := exec.Command("bash", "-c", cmd).Output()
if err != nil {
return nil, fmt.Errorf("Failed to execute command %s: %s", cmd, err)
}
freeMem, err := strconv.Atoi(string(out[:len(out)-1]))
if err != nil {
return nil, fmt.Errorf("Failed to convert output %s to integer: %s", string(out), err)
}
if _, err := s.dbdClient.ProxyRunInitOracle(ctx, &dbdpb.ProxyRunInitOracleRequest{
Params: []string{
fmt.Sprintf("--pga=%d", freeMem/8),
fmt.Sprintf("--sga=%d", freeMem/2),
fmt.Sprintf("--cdb_name=%s", req.GetCdbName()),
fmt.Sprintf("--db_domain=%s", req.GetDbDomain()),
"--logtostderr=true",
},
}); err != nil {
klog.InfoS("dbdaemon/BootstrapDatabase: error while run init_oracle: err", "err", err)
return nil, fmt.Errorf("dbdaemon/BootstrapDatabase: failed to bootstrap database due to: %v", err)
}
klog.InfoS("dbdaemon/BootstrapDatabase: bootstrap database successful")
return &dbdpb.BootstrapDatabaseResponse{}, nil
}
func (s *Server) BootstrapDatabaseAsync(ctx context.Context, req *dbdpb.BootstrapDatabaseAsyncRequest) (*lropb.Operation, error) {
job, err := lro.CreateAndRunLROJobWithID(ctx, req.GetLroInput().GetOperationId(), "BootstrapDatabase", s.lroServer,
func(ctx context.Context) (proto.Message, error) {
return s.bootstrapDatabase(ctx, req.SyncRequest)
})
if err != nil {
klog.ErrorS(err, "dbdaemon/BootstrapDatabaseAsync failed to create an LRO job", "request", req)
return nil, err
}
return &lropb.Operation{Name: job.ID(), Done: false}, nil
}
| [
"\"ORACLE_HOME\""
]
| []
| [
"ORACLE_HOME"
]
| [] | ["ORACLE_HOME"] | go | 1 | 0 | |
experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go | package experiment
import (
"os"
"github.com/litmuschaos/chaos-operator/pkg/apis/litmuschaos/v1alpha1"
litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-dns-chaos/lib"
"github.com/litmuschaos/litmus-go/pkg/clients"
"github.com/litmuschaos/litmus-go/pkg/events"
experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/environment"
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types"
"github.com/litmuschaos/litmus-go/pkg/log"
"github.com/litmuschaos/litmus-go/pkg/probe"
"github.com/litmuschaos/litmus-go/pkg/result"
"github.com/litmuschaos/litmus-go/pkg/status"
"github.com/litmuschaos/litmus-go/pkg/types"
"github.com/litmuschaos/litmus-go/pkg/utils/common"
"github.com/sirupsen/logrus"
)
// PodDNSSpoof contains steps to inject chaos
func PodDNSSpoof(clients clients.ClientSets) {
var err error
experimentsDetails := experimentTypes.ExperimentDetails{}
resultDetails := types.ResultDetails{}
eventsDetails := types.EventDetails{}
chaosDetails := types.ChaosDetails{}
//Fetching all the ENV passed from the runner pod
log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
experimentEnv.GetENV(&experimentsDetails, experimentEnv.Spoof)
// Initialise the chaos attributes
experimentEnv.InitialiseChaosVariables(&chaosDetails, &experimentsDetails)
// Initialise Chaos Result Parameters
types.SetResultAttributes(&resultDetails, chaosDetails)
if experimentsDetails.EngineName != "" {
// Initialise the probe details. Bail out upon error, as we haven't entered exp business logic yet
if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil {
log.Errorf("Unable to initialize the probes, err: %v", err)
return
}
}
//Updating the chaos result in the beginning of experiment
log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
log.Errorf("Unable to Create the Chaos Result, err: %v", err)
failStep := "Updating the chaos result of pod-dns-spoof experiment (SOT)"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
// Set the chaos result uid
result.SetResultUID(&resultDetails, clients, &chaosDetails)
// generating the event in chaosresult to marked the verdict as awaited
msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
//DISPLAY THE APP INFORMATION
log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
"Namespace": experimentsDetails.AppNS,
"Label": experimentsDetails.AppLabel,
"Chaos Duration": experimentsDetails.ChaosDuration,
"Spoof Map": experimentsDetails.SpoofMap,
})
// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
//PRE-CHAOS APPLICATION STATUS CHECK
if chaosDetails.DefaultAppHealthCheck {
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Errorf("Application status check failed, err: %v", err)
failStep := "Verify that the AUT (Application Under Test) is running (pre-chaos)"
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "")
// run the probes in the pre-chaos check
if len(resultDetails.ProbeDetails) != 0 {
err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails)
if err != nil {
log.Errorf("Probe Failed, err: %v", err)
failStep := "Failed while running probes"
msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful")
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful")
}
// generating the events for the pre-chaos check
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
// Including the litmus lib
switch experimentsDetails.ChaosLib {
case "litmus":
if err = litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
log.Errorf("Chaos injection failed, err: %v", err)
failStep := "failed in chaos injection phase"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
default:
log.Error("[Invalid]: Please Provide the correct LIB")
failStep := "no match found for specified lib"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
log.Info("[Confirmation]: chaos has been injected successfully")
resultDetails.Verdict = v1alpha1.ResultVerdictPassed
//POST-CHAOS APPLICATION STATUS CHECK
if chaosDetails.DefaultAppHealthCheck {
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Errorf("Application status check failed, err: %v", err)
failStep := "Verify that the AUT (Application Under Test) is running (post-chaos)"
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "")
// run the probes in the post-chaos check
if len(resultDetails.ProbeDetails) != 0 {
if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
log.Errorf("Probes Failed, err: %v", err)
failStep := "Failed while running probes"
msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful")
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful")
}
// generating post chaos event
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
//Updating the chaosResult in the end of experiment
log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
log.Errorf("Unable to Update the Chaos Result, err: %v", err)
return
}
// generating the event in chaosresult to marked the verdict as pass/fail
msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
reason := types.PassVerdict
eventType := "Normal"
if resultDetails.Verdict != "Pass" {
reason = types.FailVerdict
eventType = "Warning"
}
types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
if experimentsDetails.EngineName != "" {
msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
}
| [
"\"EXPERIMENT_NAME\""
]
| []
| [
"EXPERIMENT_NAME"
]
| [] | ["EXPERIMENT_NAME"] | go | 1 | 0 | |
junos/data_source_interface_test.go | package junos_test
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
// export TESTACC_INTERFACE=<inteface> for choose interface available else it's ge-0/0/3.
func TestAccDataSourceInterface_basic(t *testing.T) {
var testaccInterface string
if os.Getenv("TESTACC_INTERFACE") != "" {
testaccInterface = os.Getenv("TESTACC_INTERFACE")
} else {
testaccInterface = defaultInterfaceTestAcc
}
if os.Getenv("TESTACC_SWITCH") == "" {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccDataSourceInterfaceConfigCreate(testaccInterface),
},
{
Config: testAccDataSourceInterfaceConfigData(testaccInterface),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("data.junos_interface.testacc_datainterface",
"id", testaccInterface+".100"),
resource.TestCheckResourceAttr("data.junos_interface.testacc_datainterface",
"name", testaccInterface+".100"),
resource.TestCheckResourceAttr("data.junos_interface.testacc_datainterface",
"inet_address.#", "1"),
resource.TestCheckResourceAttr("data.junos_interface.testacc_datainterface",
"inet_address.0.address", "192.0.2.1/25"),
resource.TestCheckResourceAttr("data.junos_interface.testacc_datainterface2",
"id", testaccInterface+".100"),
),
},
},
PreventPostDestroyRefresh: true,
})
}
}
func testAccDataSourceInterfaceConfigCreate(interFace string) string {
return `
resource junos_interface testacc_datainterfaceP {
name = "` + interFace + `"
description = "testacc_datainterfaceP"
vlan_tagging = true
}
resource junos_interface testacc_datainterface {
name = "${junos_interface.testacc_datainterfaceP.name}.100"
description = "testacc_datainterface"
inet_address {
address = "192.0.2.1/25"
}
}
`
}
func testAccDataSourceInterfaceConfigData(interFace string) string {
return `
resource junos_interface testacc_datainterfaceP {
name = "` + interFace + `"
description = "testacc_datainterfaceP"
vlan_tagging = true
}
resource junos_interface testacc_datainterface {
name = "${junos_interface.testacc_datainterfaceP.name}.100"
description = "testacc_datainterface"
inet_address {
address = "192.0.2.1/25"
}
}
data junos_interface testacc_datainterface {
config_interface = "` + interFace + `"
match = "192.0.2.1/"
}
data junos_interface testacc_datainterface2 {
match = "192.0.2.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
}
`
}
| [
"\"TESTACC_INTERFACE\"",
"\"TESTACC_INTERFACE\"",
"\"TESTACC_SWITCH\""
]
| []
| [
"TESTACC_INTERFACE",
"TESTACC_SWITCH"
]
| [] | ["TESTACC_INTERFACE", "TESTACC_SWITCH"] | go | 2 | 0 | |
maistra/vendor/com_googlesource_chromium_v8/wee8/build/android/pylib/local/emulator/avd.py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import os
import socket
import stat
import subprocess
import threading
from google.protobuf import text_format # pylint: disable=import-error
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
from devil.utils import timeout_retry
from py_utils import tempfile_ext
from pylib import constants
from pylib.local.emulator import ini
from pylib.local.emulator.proto import avd_pb2
_ALL_PACKAGES = object()
_DEFAULT_AVDMANAGER_PATH = os.path.join(constants.ANDROID_SDK_ROOT, 'tools',
'bin', 'avdmanager')
# Default to a 480dp mdpi screen (a relatively large phone).
# See https://developer.android.com/training/multiscreen/screensizes
# and https://developer.android.com/training/multiscreen/screendensities
# for more information.
_DEFAULT_SCREEN_DENSITY = 160
_DEFAULT_SCREEN_HEIGHT = 960
_DEFAULT_SCREEN_WIDTH = 480
class AvdException(Exception):
"""Raised when this module has a problem interacting with an AVD."""
def __init__(self, summary, command=None, stdout=None, stderr=None):
message_parts = [summary]
if command:
message_parts.append(' command: %s' % ' '.join(command))
if stdout:
message_parts.append(' stdout:')
message_parts.extend(' %s' % line for line in stdout.splitlines())
if stderr:
message_parts.append(' stderr:')
message_parts.extend(' %s' % line for line in stderr.splitlines())
super(AvdException, self).__init__('\n'.join(message_parts))
def _Load(avd_proto_path):
"""Loads an Avd proto from a textpb file at the given path.
Should not be called outside of this module.
Args:
avd_proto_path: path to a textpb file containing an Avd message.
"""
with open(avd_proto_path) as avd_proto_file:
return text_format.Merge(avd_proto_file.read(), avd_pb2.Avd())
class _AvdManagerAgent(object):
"""Private utility for interacting with avdmanager."""
def __init__(self, avd_home, sdk_root):
"""Create an _AvdManagerAgent.
Args:
avd_home: path to ANDROID_AVD_HOME directory.
Typically something like /path/to/dir/.android/avd
sdk_root: path to SDK root directory.
"""
self._avd_home = avd_home
self._sdk_root = sdk_root
self._env = dict(os.environ)
# avdmanager, like many tools that have evolved from `android`
# (http://bit.ly/2m9JiTx), uses toolsdir to find the SDK root.
# Pass avdmanager a fake directory under the directory in which
# we install the system images s.t. avdmanager can find the
# system images.
fake_tools_dir = os.path.join(self._sdk_root, 'non-existent-tools')
self._env.update({
'ANDROID_AVD_HOME':
self._avd_home,
'AVDMANAGER_OPTS':
'-Dcom.android.sdkmanager.toolsdir=%s' % fake_tools_dir,
})
def Create(self, avd_name, system_image, force=False):
"""Call `avdmanager create`.
Args:
avd_name: name of the AVD to create.
system_image: system image to use for the AVD.
force: whether to force creation, overwriting any existing
AVD with the same name.
"""
create_cmd = [
_DEFAULT_AVDMANAGER_PATH,
'-v',
'create',
'avd',
'-n',
avd_name,
'-k',
system_image,
]
if force:
create_cmd += ['--force']
create_proc = cmd_helper.Popen(
create_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env)
output, error = create_proc.communicate(input='\n')
if create_proc.returncode != 0:
raise AvdException(
'AVD creation failed',
command=create_cmd,
stdout=output,
stderr=error)
for line in output.splitlines():
logging.info(' %s', line)
def Delete(self, avd_name):
"""Call `avdmanager delete`.
Args:
avd_name: name of the AVD to delete.
"""
delete_cmd = [
_DEFAULT_AVDMANAGER_PATH,
'-v',
'delete',
'avd',
'-n',
avd_name,
]
try:
for line in cmd_helper.IterCmdOutputLines(delete_cmd, env=self._env):
logging.info(' %s', line)
except subprocess.CalledProcessError as e:
raise AvdException('AVD deletion failed: %s' % str(e), command=delete_cmd)
class AvdConfig(object):
"""Represents a particular AVD configuration.
This class supports creation, installation, and execution of an AVD
from a given Avd proto message, as defined in
//build/android/pylib/local/emulator/proto/avd.proto.
"""
def __init__(self, avd_proto_path):
"""Create an AvdConfig object.
Args:
avd_proto_path: path to a textpb file containing an Avd message.
"""
self._config = _Load(avd_proto_path)
self._emulator_home = os.path.join(constants.DIR_SOURCE_ROOT,
self._config.avd_package.dest_path)
self._emulator_sdk_root = os.path.join(
constants.DIR_SOURCE_ROOT, self._config.emulator_package.dest_path)
self._emulator_path = os.path.join(self._emulator_sdk_root, 'emulator',
'emulator')
self._initialized = False
self._initializer_lock = threading.Lock()
def Create(self,
force=False,
snapshot=False,
keep=False,
cipd_json_output=None):
"""Create an instance of the AVD CIPD package.
This method:
- installs the requisite system image
- creates the AVD
- modifies the AVD's ini files to support running chromium tests
in chromium infrastructure
- optionally starts & stops the AVD for snapshotting (default no)
- creates and uploads an instance of the AVD CIPD package
- optionally deletes the AVD (default yes)
Args:
force: bool indicating whether to force create the AVD.
snapshot: bool indicating whether to snapshot the AVD before creating
the CIPD package.
keep: bool indicating whether to keep the AVD after creating
the CIPD package.
cipd_json_output: string path to pass to `cipd create` via -json-output.
"""
logging.info('Installing required packages.')
self.Install(packages=[
self._config.emulator_package,
self._config.system_image_package,
])
android_avd_home = os.path.join(self._emulator_home, 'avd')
if not os.path.exists(android_avd_home):
os.makedirs(android_avd_home)
avd_manager = _AvdManagerAgent(
avd_home=android_avd_home, sdk_root=self._emulator_sdk_root)
logging.info('Creating AVD.')
avd_manager.Create(
avd_name=self._config.avd_name,
system_image=self._config.system_image_name,
force=force)
try:
logging.info('Modifying AVD configuration.')
# Clear out any previous configuration or state from this AVD.
root_ini = os.path.join(android_avd_home,
'%s.ini' % self._config.avd_name)
avd_dir = os.path.join(android_avd_home, '%s.avd' % self._config.avd_name)
config_ini = os.path.join(avd_dir, 'config.ini')
with open(root_ini, 'a') as root_ini_file:
root_ini_file.write('path.rel=avd/%s.avd\n' % self._config.avd_name)
height = (self._config.avd_settings.screen.height
or _DEFAULT_SCREEN_HEIGHT)
width = (self._config.avd_settings.screen.width or _DEFAULT_SCREEN_WIDTH)
density = (self._config.avd_settings.screen.density
or _DEFAULT_SCREEN_DENSITY)
config_ini_contents = {
'disk.dataPartition.size': '4G',
'hw.lcd.density': density,
'hw.lcd.height': height,
'hw.lcd.width': width,
}
with open(config_ini, 'a') as config_ini_file:
ini.dump(config_ini_contents, config_ini_file)
# Start & stop the AVD.
self._Initialize()
instance = _AvdInstance(self._emulator_path, self._emulator_home,
self._config)
instance.Start(read_only=False, snapshot_save=snapshot)
device_utils.DeviceUtils(instance.serial).WaitUntilFullyBooted(
timeout=180, retries=0)
instance.Stop()
# The multiinstance lock file seems to interfere with the emulator's
# operation in some circumstances (beyond the obvious -read-only ones),
# and there seems to be no mechanism by which it gets closed or deleted.
# See https://bit.ly/2pWQTH7 for context.
multiInstanceLockFile = os.path.join(avd_dir, 'multiinstance.lock')
if os.path.exists(multiInstanceLockFile):
os.unlink(multiInstanceLockFile)
package_def_content = {
'package':
self._config.avd_package.package_name,
'root':
self._emulator_home,
'install_mode':
'copy',
'data': [
{
'dir': os.path.relpath(avd_dir, self._emulator_home)
},
{
'file': os.path.relpath(root_ini, self._emulator_home)
},
],
}
logging.info('Creating AVD CIPD package.')
logging.debug('ensure file content: %s',
json.dumps(package_def_content, indent=2))
with tempfile_ext.TemporaryFileName(suffix='.json') as package_def_path:
with open(package_def_path, 'w') as package_def_file:
json.dump(package_def_content, package_def_file)
logging.info(' %s', self._config.avd_package.package_name)
cipd_create_cmd = [
'cipd',
'create',
'-pkg-def',
package_def_path,
]
if cipd_json_output:
cipd_create_cmd.extend([
'-json-output',
cipd_json_output,
])
try:
for line in cmd_helper.IterCmdOutputLines(cipd_create_cmd):
logging.info(' %s', line)
except subprocess.CalledProcessError as e:
raise AvdException(
'CIPD package creation failed: %s' % str(e),
command=cipd_create_cmd)
finally:
if not keep:
logging.info('Deleting AVD.')
avd_manager.Delete(avd_name=self._config.avd_name)
def Install(self, packages=_ALL_PACKAGES):
"""Installs the requested CIPD packages.
Returns: None
Raises: AvdException on failure to install.
"""
pkgs_by_dir = {}
if packages is _ALL_PACKAGES:
packages = [
self._config.avd_package,
self._config.emulator_package,
self._config.system_image_package,
]
for pkg in packages:
if not pkg.dest_path in pkgs_by_dir:
pkgs_by_dir[pkg.dest_path] = []
pkgs_by_dir[pkg.dest_path].append(pkg)
for pkg_dir, pkgs in pkgs_by_dir.iteritems():
logging.info('Installing packages in %s', pkg_dir)
cipd_root = os.path.join(constants.DIR_SOURCE_ROOT, pkg_dir)
if not os.path.exists(cipd_root):
os.makedirs(cipd_root)
ensure_path = os.path.join(cipd_root, '.ensure')
with open(ensure_path, 'w') as ensure_file:
# Make CIPD ensure that all files are present, even if
# it thinks the package is installed.
ensure_file.write('$ParanoidMode CheckPresence\n\n')
for pkg in pkgs:
ensure_file.write('%s %s\n' % (pkg.package_name, pkg.version))
logging.info(' %s %s', pkg.package_name, pkg.version)
ensure_cmd = [
'cipd',
'ensure',
'-ensure-file',
ensure_path,
'-root',
cipd_root,
]
try:
for line in cmd_helper.IterCmdOutputLines(ensure_cmd):
logging.info(' %s', line)
except subprocess.CalledProcessError as e:
raise AvdException(
'Failed to install CIPD package %s: %s' % (pkg.package_name,
str(e)),
command=ensure_cmd)
# The emulator requires that some files are writable.
for dirname, _, filenames in os.walk(self._emulator_home):
for f in filenames:
path = os.path.join(dirname, f)
mode = os.lstat(path).st_mode
if mode & stat.S_IRUSR:
mode = mode | stat.S_IWUSR
os.chmod(path, mode)
def _Initialize(self):
if self._initialized:
return
with self._initializer_lock:
if self._initialized:
return
# Emulator start-up looks for the adb daemon. Make sure it's running.
adb_wrapper.AdbWrapper.StartServer()
# Emulator start-up tries to check for the SDK root by looking for
# platforms/ and platform-tools/. Ensure they exist.
# See http://bit.ly/2YAkyFE for context.
required_dirs = [
os.path.join(self._emulator_sdk_root, 'platforms'),
os.path.join(self._emulator_sdk_root, 'platform-tools'),
]
for d in required_dirs:
if not os.path.exists(d):
os.makedirs(d)
def CreateInstance(self):
"""Creates an AVD instance without starting it.
Returns:
An _AvdInstance.
"""
self._Initialize()
return _AvdInstance(self._emulator_path, self._emulator_home, self._config)
def StartInstance(self):
"""Starts an AVD instance.
Returns:
An _AvdInstance.
"""
instance = self.CreateInstance()
instance.Start()
return instance
class _AvdInstance(object):
"""Represents a single running instance of an AVD.
This class should only be created directly by AvdConfig.StartInstance,
but its other methods can be freely called.
"""
def __init__(self, emulator_path, emulator_home, avd_config):
"""Create an _AvdInstance object.
Args:
emulator_path: path to the emulator binary.
emulator_home: path to the emulator home directory.
avd_config: AVD config proto.
"""
self._avd_config = avd_config
self._avd_name = avd_config.avd_name
self._emulator_home = emulator_home
self._emulator_path = emulator_path
self._emulator_proc = None
self._emulator_serial = None
self._sink = None
def __str__(self):
return '%s|%s' % (self._avd_name, (self._emulator_serial or id(self)))
def Start(self,
read_only=True,
snapshot_save=False,
window=False,
writable_system=False):
"""Starts the emulator running an instance of the given AVD."""
with tempfile_ext.TemporaryFileName() as socket_path, (contextlib.closing(
socket.socket(socket.AF_UNIX))) as sock:
sock.bind(socket_path)
emulator_cmd = [
self._emulator_path,
'-avd',
self._avd_name,
'-report-console',
'unix:%s' % socket_path,
'-no-boot-anim',
]
android_avd_home = os.path.join(self._emulator_home, 'avd')
avd_dir = os.path.join(android_avd_home, '%s.avd' % self._avd_name)
hardware_qemu_path = os.path.join(avd_dir, 'hardware-qemu.ini')
if os.path.exists(hardware_qemu_path):
with open(hardware_qemu_path) as hardware_qemu_file:
hardware_qemu_contents = ini.load(hardware_qemu_file)
else:
hardware_qemu_contents = {}
if read_only:
emulator_cmd.append('-read-only')
if not snapshot_save:
emulator_cmd.append('-no-snapshot-save')
if writable_system:
emulator_cmd.append('-writable-system')
emulator_env = {}
if self._emulator_home:
emulator_env['ANDROID_EMULATOR_HOME'] = self._emulator_home
if window:
if 'DISPLAY' in os.environ:
emulator_env['DISPLAY'] = os.environ.get('DISPLAY')
else:
raise AvdException('Emulator failed to start: DISPLAY not defined')
else:
emulator_cmd.append('-no-window')
hardware_qemu_contents['hw.sdCard'] = 'true'
if self._avd_config.avd_settings.sdcard.size:
sdcard_path = os.path.join(self._emulator_home, 'avd',
'%s.avd' % self._avd_name, 'cr-sdcard.img')
if not os.path.exists(sdcard_path):
mksdcard_path = os.path.join(
os.path.dirname(self._emulator_path), 'mksdcard')
mksdcard_cmd = [
mksdcard_path,
self._avd_config.avd_settings.sdcard.size,
sdcard_path,
]
cmd_helper.RunCmd(mksdcard_cmd)
emulator_cmd.extend(['-sdcard', sdcard_path])
hardware_qemu_contents['hw.sdCard.path'] = sdcard_path
with open(hardware_qemu_path, 'w') as hardware_qemu_file:
ini.dump(hardware_qemu_contents, hardware_qemu_file)
sock.listen(1)
logging.info('Starting emulator.')
# TODO(jbudorick): Add support for logging emulator stdout & stderr at
# higher logging levels.
self._sink = open('/dev/null', 'w')
self._emulator_proc = cmd_helper.Popen(
emulator_cmd, stdout=self._sink, stderr=self._sink, env=emulator_env)
# Waits for the emulator to report its serial as requested via
# -report-console. See http://bit.ly/2lK3L18 for more.
def listen_for_serial(s):
logging.info('Waiting for connection from emulator.')
with contextlib.closing(s.accept()[0]) as conn:
val = conn.recv(1024)
return 'emulator-%d' % int(val)
try:
self._emulator_serial = timeout_retry.Run(
listen_for_serial, timeout=30, retries=0, args=[sock])
logging.info('%s started', self._emulator_serial)
except Exception as e:
self.Stop()
raise AvdException('Emulator failed to start: %s' % str(e))
def Stop(self):
"""Stops the emulator process."""
if self._emulator_proc:
if self._emulator_proc.poll() is None:
if self._emulator_serial:
device_utils.DeviceUtils(self._emulator_serial).adb.Emu('kill')
else:
self._emulator_proc.terminate()
self._emulator_proc.wait()
self._emulator_proc = None
if self._sink:
self._sink.close()
self._sink = None
@property
def serial(self):
return self._emulator_serial
| []
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | python | 1 | 0 | |
evaluate.py | import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
import os
import datetime
import numpy as np
import argparse
from cnn_model import unroll
def main():
parser = argparse.ArgumentParser(description='Evaluate .')
parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')
parser.add_argument('--model', type=str, help="Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)", default='lstm')
parser.add_argument('--gpu', type=int, help="Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)", default=None)
args = parser.parse_args()
""" GPU management """
allow_gpu_mem_growth = True
gpu_memory_fraction = 1
gpu_id = args.gpu
if args.gpu is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
dataloader = Dataloader(datafolder="data/eval", batchsize=500)
#dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere,
# debug=False,
# do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)
"""
Load
parameters
from init_from model
"""
with open(os.path.join(args.rundir, "args.pkl"), "rb") as f:
modelargs = pickle.load(f)
"""
Create
new
model
object
with same parameter """
print("building model graph")
if args.model in ["rnn","lstm"]:
model = rnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"], batch_size=dataloader.batchsize,
adam_lr=modelargs["adam_lr"],rnn_cell_type=args.model , dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=0)
evaluate=evaluate_rnn
if args.model == "cnn":
model = cnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"],
adam_lr=1e-3, dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=gpu_id)
evaluate = evaluate_cnn
probabilities, targets, observations = evaluate(model,dataloader,
init_dir=args.rundir,
print_every=20,
gpu_memory_fraction=gpu_memory_fraction,
allow_gpu_mem_growth=allow_gpu_mem_growth)
#np.save(os.path.join(args.rundir, "eval_confusion_matrix.npy"), confusion_matrix)
np.save(os.path.join(args.rundir, "eval_probabilities.npy"), probabilities)
np.save(os.path.join(args.rundir, "eval_targets.npy"), targets)
np.save(os.path.join(args.rundir, "eval_observations.npy"), observations)
def evaluate_rnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],
feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
#total_cm += cm
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
all_obs
def evaluate_cnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
with open(init_dir + "/steps.txt", "r") as f:
line = f.read()
step_, epoch_ = line.split(" ")
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
# unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies
batch_size, max_seqlengths, n_input = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
mask = mask_.reshape(-1)
obs_ = np.arange(0, max_seqlengths) * ones
obs = obs_.reshape(-1)[mask]
""" unroll data """
X, y = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
scores, targets = sess.run([model.scores, model.targets],
feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
obs
if __name__ == '__main__':
main() | []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
Configs/dm.py | # fileName: Configs/dm.py
# copyright ©️ 2021 nabilanavab
import os
#--------------->
#--------> CONFIG VAR.
#------------------->
class Config(object):
# get API_ID, API_HASH values from my.telegram.org (Mandatory)
API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
# add API_TOKEN from @botfather (Mandatory)
API_TOKEN = os.environ.get("API_TOKEN")
# channel id for forced Subscription with -100 (Optional)
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL")
# get convertAPI secret (Optional)
CONVERT_API = os.environ.get("CONVERT_API")
# set maximum file size for preventing overload (Optional)
MAX_FILE_SIZE = os.environ.get("MAX_FILE_SIZE")
# add admins Id list by space seperated (Optional)
ADMINS = list(set(int(x) for x in os.environ.get("ADMINS", "0").split()))
if ADMINS:
# Bot only for admins [True/False] (Optional)
ADMIN_ONLY = os.environ.get("ADMIN_ONLY", False)
# banned Users cant use this bot (Optional)
BANNED_USERS = list(set(int(x) for x in os.environ.get("BANNED_USERS", "0").split()))
if not BANNED_USERS:
BANNED_USERS = []
# thumbnail
PDF_THUMBNAIL = "./thumbnail.jpeg"
# Telegram: @nabilanavab
| []
| []
| [
"ADMINS",
"UPDATE_CHANNEL",
"BANNED_USERS",
"CONVERT_API",
"MAX_FILE_SIZE",
"API_TOKEN",
"API_ID",
"API_HASH",
"ADMIN_ONLY"
]
| [] | ["ADMINS", "UPDATE_CHANNEL", "BANNED_USERS", "CONVERT_API", "MAX_FILE_SIZE", "API_TOKEN", "API_ID", "API_HASH", "ADMIN_ONLY"] | python | 9 | 0 | |
test/integration/test_jsonrpc.py | import pytest
import sys
import os
import re
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import config
from dacashd import DACashDaemon
from dacash_config import DACashConfig
def test_dacashd():
config_text = DACashConfig.slurp_config_file(config.dacash_conf)
network = 'mainnet'
is_testnet = False
genesis_hash = u'00000b8ab13e8a9fa1108a80c066b95e48209616cf142b5f87527516a564a9c2'
for line in config_text.split("\n"):
if line.startswith('testnet=1'):
network = 'testnet'
is_testnet = True
genesis_hash = u'0000074f17a346a7a4df7c7f4e5c48b18d6f9d3313a7b20fc8090b490d2c80ef'
creds = DACashConfig.get_rpc_creds(config_text, network)
dacashd = DACashDaemon(**creds)
assert dacashd.rpc_command is not None
assert hasattr(dacashd, 'rpc_connection')
# DACash testnet block 0 hash == 00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c
# test commands without arguments
info = dacashd.rpc_command('getinfo')
info_keys = [
'blocks',
'connections',
'difficulty',
'errors',
'protocolversion',
'proxy',
'testnet',
'timeoffset',
'version',
]
for key in info_keys:
assert key in info
assert info['testnet'] is is_testnet
# test commands with args
assert dacashd.rpc_command('getblockhash', 0) == genesis_hash
| []
| []
| [
"SENTINEL_ENV",
"SENTINEL_CONFIG"
]
| [] | ["SENTINEL_ENV", "SENTINEL_CONFIG"] | python | 2 | 0 | |
tests/experimental/test_auth.py | #!/usr/bin/env python
"""Tests admin-related functionality"""
import os
from contextlib import contextmanager
from time import sleep
import pytest
import python_pachyderm
from python_pachyderm.experimental.service import auth_proto, identity_proto
from tests import util
# bp_to_pb: OidcConfig -> OIDCConfig
@pytest.fixture
def client():
pc = python_pachyderm.experimental.Client()
pc.activate_license(os.environ["PACH_PYTHON_ENTERPRISE_CODE"])
pc.add_cluster("localhost", "localhost:1650", secret="secret")
pc.activate_enterprise("localhost:1650", "localhost", "secret")
pc.auth_token = "iamroot"
pc.activate_auth(pc.auth_token)
pc.set_identity_server_config(
config=identity_proto.IdentityServerConfig(issuer="http://localhost:1658")
)
yield pc
# not redundant because auth_token could be overriden by tests
pc.auth_token = "iamroot"
try:
pc.delete_all_identity()
except Exception:
pass
try:
pc.delete_all_license()
except Exception:
pass
try:
pc.deactivate_auth()
except Exception:
pass
pc.deactivate_enterprise()
@util.skip_if_no_enterprise()
def test_auth_configuration(client):
client.get_auth_configuration()
client.set_auth_configuration(
auth_proto.OidcConfig(
issuer="http://localhost:1658",
client_id="client",
client_secret="secret",
redirect_uri="http://test.example.com",
)
)
@util.skip_if_no_enterprise()
def test_cluster_role_bindings(client):
cluster_resource = auth_proto.Resource(type=auth_proto.ResourceType.CLUSTER)
binding = client.get_role_binding(cluster_resource)
assert binding["pach:root"].roles["clusterAdmin"]
client.modify_role_binding(
cluster_resource, "robot:someuser", roles=["clusterAdmin"]
)
binding = client.get_role_binding(cluster_resource)
assert binding["robot:someuser"].roles["clusterAdmin"]
@util.skip_if_no_enterprise()
def test_authorize(client):
client.authorize(
auth_proto.Resource(type=auth_proto.ResourceType.REPO, name="foobar"),
[auth_proto.Permission.REPO_READ],
)
@util.skip_if_no_enterprise()
def test_who_am_i(client):
assert client.who_am_i().username == "pach:root"
@util.skip_if_no_enterprise()
def test_get_roles_for_permission(client):
# Checks built-in roles
roles = client.get_roles_for_permission(auth_proto.Permission.REPO_READ)
for r in roles:
assert auth_proto.Permission.REPO_READ in r.permissions
roles = client.get_roles_for_permission(
auth_proto.Permission.CLUSTER_GET_PACHD_LOGS
)
for r in roles:
assert auth_proto.Permission.CLUSTER_GET_PACHD_LOGS in r.permissions
@util.skip_if_no_enterprise()
def test_robot_token(client):
auth_token = client.get_robot_token("robot:root", ttl=30)
client.auth_token = auth_token
assert client.who_am_i().username == "robot:root"
client.revoke_auth_token(auth_token)
with pytest.raises(python_pachyderm.RpcError):
client.who_am_i()
@util.skip_if_no_enterprise()
def test_groups(client):
assert client.get_groups() == []
client.set_groups_for_user("pach:root", ["foogroup"])
assert client.get_groups() == ["foogroup"]
assert client.get_users("foogroup") == ["pach:root"]
client.modify_members("foogroup", remove=["pach:root"])
assert client.get_groups() == []
assert client.get_users("foogroup") == []
| []
| []
| [
"PACH_PYTHON_ENTERPRISE_CODE"
]
| [] | ["PACH_PYTHON_ENTERPRISE_CODE"] | python | 1 | 0 | |
pkg/khcheckcrd/api.go | // Copyright 2018 Comcast Cable Communications Management, LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package khcheckcrd implements a kuberhealthy check CRD for configuring
// Kuberhealthy with external check pods.
package khcheckcrd
import (
"os"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var namespace = os.Getenv("POD_NAMESPACE")
const resource = "khchecks"
const group = "comcast.github.io"
const version = "v1"
const defaultNamespace = "kuberhealthy"
func init() {
if namespace == "" {
namespace = defaultNamespace
}
}
// Client creates a rest client to use for interacting with CRDs
func Client(GroupName string, GroupVersion string, kubeConfig string, namespace string) (*KuberhealthyCheckClient, error) {
var c *rest.Config
var err error
c, err = rest.InClusterConfig()
if err != nil {
c, err = clientcmd.BuildConfigFromFlags("", kubeConfig)
}
if err != nil {
return &KuberhealthyCheckClient{}, err
}
err = ConfigureScheme(GroupName, GroupVersion)
if err != nil {
return &KuberhealthyCheckClient{}, err
}
config := *c
config.ContentConfig.GroupVersion = &schema.GroupVersion{Group: GroupName, Version: GroupVersion}
config.APIPath = "/apis"
// config.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: scheme.Codecs}
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
config.UserAgent = rest.DefaultKubernetesUserAgent()
client, err := rest.RESTClientFor(&config)
return &KuberhealthyCheckClient{restClient: client}, err
}
| [
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE"
]
| [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
pkg/blocktestservice/setup.go | // Package blocktestservice is used for creating the blocklist file
package blocktestservice
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"sync"
"github.com/LambdaTest/test-at-scale/config"
"github.com/LambdaTest/test-at-scale/pkg/core"
"github.com/LambdaTest/test-at-scale/pkg/global"
"github.com/LambdaTest/test-at-scale/pkg/lumber"
"github.com/LambdaTest/test-at-scale/pkg/requestutils"
"github.com/LambdaTest/test-at-scale/pkg/utils"
)
const (
delimiter = "##"
)
// blocktest represents the blocked test suites and test cases.
type blocktest struct {
Source string `json:"source"`
Locator string `json:"locator"`
Status string `json:"status"`
}
// blocktestAPIResponse fetch blocked test cases from neuron API
type blocktestAPIResponse struct {
Name string `json:"test_name"`
TestLocator string `json:"test_locator"`
Status string `json:"status"`
}
// blocktestLocator stores locator and its status info
type blocktestLocator struct {
Locator string `json:"locator"`
Status string `json:"status"`
}
// TestBlockTestService represents an instance of ConfManager instance
type TestBlockTestService struct {
cfg *config.NucleusConfig
requests core.Requests
logger lumber.Logger
endpoint string
blockTestEntities map[string][]blocktest
once sync.Once
errChan chan error
}
// NewTestBlockTestService creates and returns a new TestBlockTestService instance
func NewTestBlockTestService(cfg *config.NucleusConfig, logger lumber.Logger) (*TestBlockTestService, error) {
return &TestBlockTestService{
cfg: cfg,
logger: logger,
requests: requestutils.New(logger),
endpoint: global.NeuronHost + "/blocktest",
blockTestEntities: make(map[string][]blocktest),
errChan: make(chan error, 1),
}, nil
}
func (tbs *TestBlockTestService) fetchBlockListFromNeuron(ctx context.Context, repoID, branch string) error {
var inp []blocktestAPIResponse
params := utils.FetchQueryParams()
params["branch"] = branch
params["taskID"] = tbs.cfg.TaskID
headers := map[string]string{
"Authorization": fmt.Sprintf("%s %s", "Bearer", os.Getenv("TOKEN")),
}
rawBytes, statusCode, err := tbs.requests.MakeAPIRequest(ctx, http.MethodGet, tbs.endpoint, nil, params, headers)
if statusCode == http.StatusNotFound {
return nil
}
if err != nil {
return err
}
if jsonErr := json.Unmarshal(rawBytes, &inp); jsonErr != nil {
tbs.logger.Errorf("Unable to fetch blocklist response: %v", jsonErr)
return jsonErr
}
// populate bl
blocktestLocators := make([]*blocktestLocator, 0, len(inp))
for i := range inp {
blockLocator := new(blocktestLocator)
blockLocator.Locator = inp[i].TestLocator
blockLocator.Status = inp[i].Status
blocktestLocators = append(blocktestLocators, blockLocator)
}
tbs.populateBlockList("api", blocktestLocators)
return nil
}
// GetBlockTests provides list of blocked test cases
func (tbs *TestBlockTestService) GetBlockTests(ctx context.Context, tasConfig *core.TASConfig, repoID, branch string) error {
tbs.once.Do(func() {
blocktestLocators := make([]*blocktestLocator, 0, len(tasConfig.Blocklist))
for _, locator := range tasConfig.Blocklist {
blockLocator := new(blocktestLocator)
blockLocator.Locator = locator
blockLocator.Status = string(core.Blocklisted)
blocktestLocators = append(blocktestLocators, blockLocator)
}
tbs.populateBlockList("yml", blocktestLocators)
if err := tbs.fetchBlockListFromNeuron(ctx, repoID, branch); err != nil {
tbs.logger.Errorf("Unable to fetch remote blocklist: %v. Ignoring remote response", err)
tbs.errChan <- err
return
}
tbs.logger.Infof("Block tests: %+v", tbs.blockTestEntities)
// write blocklistest tests on disk
marshalledBlocklist, err := json.Marshal(tbs.blockTestEntities)
if err != nil {
tbs.logger.Errorf("Unable to json marshal blocklist: %+v", err)
tbs.errChan <- err
return
}
if err = ioutil.WriteFile(global.BlockTestFileLocation, marshalledBlocklist, 0644); err != nil {
tbs.logger.Errorf("Unable to write blocklist file: %+v", err)
tbs.errChan <- err
return
}
tbs.blockTestEntities = nil
})
select {
case err := <-tbs.errChan:
return err
default:
return nil
}
}
func (tbs *TestBlockTestService) populateBlockList(blocktestSource string, blocktestLocators []*blocktestLocator) {
i := 0
for _, test := range blocktestLocators {
//locators must end with delimiter
if !strings.HasSuffix(test.Locator, delimiter) {
test.Locator += delimiter
}
i = strings.Index(test.Locator, delimiter)
//TODO: handle duplicate entries and ignore its individual suites or testcases in blocklist if file is blocklisted
if val, ok := tbs.blockTestEntities[test.Locator[:i]]; ok {
tbs.blockTestEntities[test.Locator[:i]] = append(val, blocktest{Source: blocktestSource, Locator: test.Locator, Status: test.Status})
} else {
tbs.blockTestEntities[test.Locator[:i]] = append([]blocktest{}, blocktest{Source: blocktestSource, Locator: test.Locator, Status: test.Status})
}
}
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | go | 1 | 0 | |
main.go | package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/urfave/cli"
)
var (
version = "1.0.0" // build number set at compile-time
)
func main() {
app := cli.NewApp()
app.Name = "google cloud storage plugin"
app.Usage = "google cloud storage plugin"
app.Action = run
app.Version = version
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "gcs-key",
Usage: "google cloud storage credentials file",
EnvVar: "GCS_KEY,PLUGIN_KEY",
},
cli.StringFlag{
Name: "bucket",
Usage: "google cloud storage bucket name",
EnvVar: "GCS_BUCKET,PLUGIN_BUCKET",
},
cli.StringFlag{
Name: "source",
Usage: "upload files from source folder",
EnvVar: "GCS_SOURCE,PLUGIN_SOURCE",
},
cli.StringFlag{
Name: "strip-prefix",
Usage: "strip the prefix from the target",
EnvVar: "GCS_STRIP_PREFIX,PLUGIN_STRIP_PREFIX",
},
cli.StringFlag{
Name: "target",
Usage: "upload files to target folder",
EnvVar: "GCS_TARGET,PLUGIN_TARGET",
},
cli.BoolFlag{
Name: "target-auto-date",
Usage: "target folder auto create current date folder(global setting)",
EnvVar: "GCS_TARGET_DATE_FOLDER,PLUGIN_TARGET_DATE_FOLDER",
},
cli.StringFlag{
Name: "trigger-branch",
Usage: "trigger branch from submodule",
EnvVar: "GCS_TRIGGER_BRANCH,PLUGIN_TRIGGER_BRANCH",
},
cli.StringFlag{
Name: "trigger-folder",
Usage: "trigger save folder",
EnvVar: "GCS_TRIGGER_FOLDER,PLUGIN_TRIGGER_FOLDER",
},
cli.StringFlag{
Name: "tag-module",
Usage: "tag module from submodule",
EnvVar: "GCS_TAG_MODULE,PLUGIN_TAG_MODULE",
},
cli.StringFlag{
Name: "tag-folder",
Usage: "tag save folder",
EnvVar: "GCS_TAG_FOLDER,PLUGIN_TAG_FOLDER",
},
cli.BoolFlag{
Name: "debug",
Usage: "show debug",
EnvVar: "GCS_DEBUG,PLUGIN_DEBUG",
},
cli.StringFlag{
Name: "repo.owner",
Usage: "repository owner",
EnvVar: "DRONE_REPO_OWNER",
},
cli.StringFlag{
Name: "repo.name",
Usage: "repository name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "commit.sha",
Usage: "git commit sha",
EnvVar: "DRONE_COMMIT_SHA",
Value: "unsetSHA",
},
cli.StringFlag{
Name: "commit.ref",
Value: "refs/heads/master",
Usage: "git commit ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit.branch",
Value: "master",
Usage: "git commit branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit.author",
Usage: "git author name",
EnvVar: "DRONE_COMMIT_AUTHOR",
Value: "unknown author",
},
cli.StringFlag{
Name: "commit.message",
Usage: "commit message",
EnvVar: "DRONE_COMMIT_MESSAGE",
Value: "unset message",
},
cli.StringFlag{
Name: "build.event",
Value: "push",
Usage: "build event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.IntFlag{
Name: "build.number",
Usage: "build number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.StringFlag{
Name: "build.status",
Usage: "build status",
Value: "success",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build.link",
Usage: "build link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.Int64Flag{
Name: "build.started",
Usage: "build started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.Int64Flag{
Name: "build.created",
Usage: "build created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.StringFlag{
Name: "build.tag",
Usage: "build tag",
EnvVar: "DRONE_TAG",
},
cli.Int64Flag{
Name: "job.started",
Usage: "job started",
EnvVar: "DRONE_JOB_STARTED",
},
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
os.Exit(1)
}
}
func run(c *cli.Context) error {
plugin := Plugin{}
if c.Bool("debug") {
log.SetLevel(log.DebugLevel)
}
plugin = Plugin{
Repo: Repo{
Owner: c.String("repo.owner"),
Name: c.String("repo.name"),
},
Build: Build{
Tag: c.String("build.tag"),
Number: c.Int("build.number"),
Event: c.String("build.event"),
Status: c.String("build.status"),
Commit: c.String("commit.sha"),
Ref: c.String("commit.ref"),
Branch: c.String("commit.branch"),
Author: c.String("commit.author"),
Message: c.String("commit.message"),
Link: c.String("build.link"),
Started: c.Int64("build.started"),
Created: c.Int64("build.created"),
},
Job: Job{
Started: c.Int64("job.started"),
},
Credentials: c.String("gcs-key"),
Bucket: c.String("bucket"),
Source: c.String("source"),
StripPrefix: c.String("strip-prefix"),
Target: c.String("target"),
TargetDateFolder: c.Bool("target-auto-date"),
TriggerFolder: c.String("trigger-folder"),
TagFolder: c.String("tag-folder"),
// read from environmental variables
TriggerModule: os.Getenv("T_MODULE"),
TriggerEven: os.Getenv("T_EVEN"),
TriggerBranch: os.Getenv("T_BRANCH"),
TriggerSHA: os.Getenv("T_SHA"),
Access: c.StringSlice("acl"),
Exclude: c.StringSlice("exclude"),
Compress: c.StringSlice("compress"),
}
log.WithFields(log.Fields{
"bucket": plugin.Bucket,
"source": plugin.Source,
"target": plugin.Target,
"triggerFolder": plugin.TriggerFolder,
"tagFolder": plugin.TagFolder,
"targetDateFolder": plugin.TargetDateFolder,
"triggerModule": plugin.TriggerModule,
"triggerEven": plugin.TriggerEven,
"triggerBranch": plugin.TriggerBranch,
"triggerSHA": plugin.TriggerSHA,
"buildEvent": plugin.Build.Event,
}).Debug("Parameter..")
return plugin.Exec()
}
| [
"\"T_MODULE\"",
"\"T_EVEN\"",
"\"T_BRANCH\"",
"\"T_SHA\""
]
| []
| [
"T_BRANCH",
"T_MODULE",
"T_SHA",
"T_EVEN"
]
| [] | ["T_BRANCH", "T_MODULE", "T_SHA", "T_EVEN"] | go | 4 | 0 | |
delta/data/task/text_seq_label_task_test.py | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' text sequence labeling task unittest '''
import os
from pathlib import Path
import numpy as np
import tensorflow as tf
from absl import logging
from delta import utils
from delta.data.task.text_seq_label_task import TextSeqLabelTask
from delta.utils.register import import_all_modules_for_register
class TextSeqLabelTaskTest(tf.test.TestCase):
''' sequence labeling task test'''
def setUp(self):
''' set up'''
import_all_modules_for_register()
main_root = os.environ['MAIN_ROOT']
main_root = Path(main_root)
self.config_file = main_root.joinpath(
'egs/mock_text_seq_label_data/seq-label/v1/config/seq-label-mock.yml')
def tearDown(self):
''' tear down '''
def test_english(self):
""" test seq label task of english data """
config = utils.load_config(self.config_file)
max_len = config["model"]["net"]["structure"]["max_len"]
config["data"]["task"]["language"] = "english"
task_config = config["data"]["task"]
task_config[
"text_vocab"] = "egs/mock_text_seq_label_data/seq-label/v1/data/text_vocab.txt"
task_config["need_shuffle"] = False
task = TextSeqLabelTask(config, utils.TRAIN)
# test offline data
data = task.dataset()
self.assertTrue("input_x_dict" in data and
"input_x" in data["input_x_dict"])
self.assertTrue("input_y_dict" in data and
"input_y" in data["input_y_dict"])
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run(
[data["input_x_dict"]["input_x"], data["input_y_dict"]["input_y"]])
logging.debug(res[0][0][:5])
logging.debug(res[1][0])
self.assertAllEqual(res[0][0][:5], [2, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (10, max_len))
self.assertEqual(np.shape(res[1]), (10, max_len))
# test online data
export_inputs = task.export_inputs()
self.assertTrue("export_inputs" in export_inputs and
"input_sentence" in export_inputs["export_inputs"])
input_sentence = export_inputs["export_inputs"]["input_sentence"]
input_x = export_inputs["model_inputs"]["input_x"]
with self.session() as sess:
sess.run(data["iterator"].initializer, feed_dict=data["init_feed_dict"])
res = sess.run(input_x, feed_dict={input_sentence: ["I feel good ."]})
logging.debug(res[0][:5])
self.assertAllEqual(res[0][:5], [0, 3, 4, 5, 0])
self.assertEqual(np.shape(res[0]), (max_len,))
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
tf.test.main()
| []
| []
| [
"MAIN_ROOT"
]
| [] | ["MAIN_ROOT"] | python | 1 | 0 | |
googleRtConfKV_test.go | package rtconf
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/google/uuid"
)
var (
val = "val"
)
func TestGoogleRtConfKv_GetSet(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
// get that thing
if retVal, err := kv.Get(key); err != nil {
t.Fatal(err)
} else if string(retVal) != val {
t.Fatal("not val")
}
}
func TestGoogleRtConfKv_GetSetWrongKey(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
// get that thing
if val, err := kv.Get("wrongKey"); err == nil {
t.Fatal("expected error here, got value length:", len(val))
}
}
func TestGoogleRtConfKv_GetSetWrongBucket(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
// get that thing
if val, err := kv.Get("/a/b/d/this"); err == nil {
t.Fatal("expected error here, got value length:", len(val))
}
}
func TestGoogleRtConfKv_SetEmptyKey(t *testing.T) {
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set("", []byte(val)); err == nil {
t.Fatal("expected err here")
}
}
func TestGoogleRtConfKv_GetEmptyKey(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
// get that thing
if val, err := kv.Get(""); err == nil {
t.Fatal("expected error here, got value length:", len(val))
}
}
func TestGoogleRtConfKv_GetBucket(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
// get that thing
if val, err := kv.Get("/a/b/c"); err == nil {
t.Fatal("expected error here, got value length:", len(val))
}
}
func TestGoogleRtConfKv_SetNilValue(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, nil); err == nil {
t.Fatal("expected err when trying to set a nil value")
}
}
func TestGoogleRtConfKv_SetZeroValue(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte{}); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
}
func TestGoogleRtConfKv_GetZeroValue(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte{}); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
if val, err := kv.Get(key); err != nil {
t.Fatal(err)
} else {
if val == nil {
t.Fatal("expected val to be zero length but not nil, got nil")
} else {
if len(val) != 0 {
t.Fatal("expected val to be zero length, got:", len(val))
}
}
}
}
func TestGoogleRtConfKv_DeleteKey(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
// now delete the tree
if err := kv.Delete(key); err != nil {
t.Fatal(err)
}
// now ensure you can't get that thing
if val, err := kv.Get(key); err == nil && val != nil {
t.Fatal("expected returned value to be nil, got slice length:", len(val))
}
}
func TestGoogleRtConfKv_DeleteTree(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
bktName, _ := filepath.Split(key)
// set something else in the same bucket
if err := kv.Set(filepath.Join(bktName, "someOtherKey"), []byte("someOtherValue")); err != nil {
t.Fatal(err)
}
// now delete the tree
if err := kv.Delete("/a/b"); err != nil {
t.Fatal(err)
}
// now ensure you can't get that thing
if val, err := kv.Get(key); err == nil && val != nil {
t.Fatal("expected returned value to be nil, got slice length:", len(val))
}
// now ensure you can't get that other thing as well
if val, err := kv.Get(filepath.Join(bktName, "someOtherKey")); err == nil && val != nil {
t.Fatal("expected returned value to be nil, got slice length:", len(val))
}
}
func TestGoogleRtConfKv_DeleteDeletedKey(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
bktName, _ := filepath.Split(key)
key2 := filepath.Join(bktName, "someOtherKey")
// set something else in the same bucket
if err := kv.Set(key2, []byte("someOtherValue")); err != nil {
t.Fatal(err)
}
defer kv.Delete(key2)
// now delete the key
if err := kv.Delete(key); err != nil {
t.Fatal(err)
}
// now delete the key
if err := kv.Delete(key); err == nil {
t.Fatal("expected error when deleting key twice")
}
}
func TestGoogleRtConfKv_Enumerate(t *testing.T) {
id := uuid.New().String()
key := filepath.Join("a/b/c/", id)
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
defer kv.Delete(key)
bktName, _ := filepath.Split(key)
key2 := filepath.Join(bktName, "someOtherKey")
// set something else in the same bucket
if err := kv.Set(key2, []byte("someOtherValue")); err != nil {
t.Fatal(err)
}
defer kv.Delete(key2)
keys, err := kv.Enumerate("/a/b")
if err != nil {
t.Fatal(err)
}
for _, key := range keys {
switch _, v := filepath.Split(key); v {
case id, "someOtherKey":
default:
t.Fatal("did not expect this key to be present in the list:", key)
}
}
}
func TestGoogleRtConfKv_DeleteEnumerate(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
bktName, _ := filepath.Split(key)
key2 := filepath.Join(bktName, "someOtherKey")
// set something else in the same bucket
if err := kv.Set(key2, []byte("someOtherValue")); err != nil {
t.Fatal(err)
}
defer kv.Delete(key2)
if err := kv.Delete(key); err != nil {
t.Fatal(err)
}
keys, err := kv.Enumerate("/a/b/")
if err != nil {
t.Fatal(err)
}
if len(keys) != 1 {
t.Fatal("expected only one key, found:", len(keys))
}
for _, key := range keys {
switch _, v := filepath.Split(key); v {
case "someOtherKey":
default:
t.Fatal("did not expect this key to be present in the list:", key)
}
}
}
func TestGoogleRtConfKv_DeleteAllEnumerate(t *testing.T) {
key := filepath.Join("a/b/c/", uuid.New().String())
kv, err := NewGoogleRtConf(os.Getenv("GOOGLE_PROJECT"), "my-config")
if err != nil {
t.Fatal(err)
}
// set something
if err := kv.Set(key, []byte(val)); err != nil {
t.Fatal(err)
}
bktName, _ := filepath.Split(key)
key2 := filepath.Join(bktName, "someOtherKey")
// set something else in the same bucket
if err := kv.Set(key2, []byte("someOtherValue")); err != nil {
t.Fatal(err)
}
if err := kv.Delete(key); err != nil {
t.Fatal(err)
}
if err := kv.Delete(key2); err != nil {
t.Fatal(err)
}
keys, err := kv.Enumerate("/a/b/")
if err != nil {
t.Fatal(err)
}
if len(keys) > 0 {
fmt.Println(keys)
t.Fatal("did not expect any key to be listed, found:", len(keys))
}
}
| [
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_PROJECT\""
]
| []
| [
"GOOGLE_PROJECT"
]
| [] | ["GOOGLE_PROJECT"] | go | 1 | 0 | |
services/rpc/cart/internal/logic/logic_suite_test.go | package logic_test
import (
"log"
"os"
"testing"
"k8scommerce/services/rpc/cart/internal/config"
"k8scommerce/services/rpc/cart/internal/server"
"k8scommerce/services/rpc/cart/internal/svc"
"github.com/k8scommerce/k8scommerce/internal/gcache"
"github.com/k8scommerce/k8scommerce/internal/repos"
"github.com/joho/godotenv"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/zeromicro/go-zero/core/conf"
)
var (
repo repos.Repo
srv *server.CartClientServer
)
func TestRepos(t *testing.T) {
defer GinkgoRecover()
RegisterFailHandler(Fail)
loadEnv()
dbConnect()
var c config.Config
conf.MustLoad("../../etc/cart.yaml", &c, conf.UseEnv())
ctx := svc.NewServiceContext(c)
ctx.Cache = gcache.NewGCache()
srv = server.NewCartClientServer(ctx)
RunSpecs(t, "CartLogic Suite")
}
func loadEnv() {
err := godotenv.Load("../../../../../.env")
Expect(err).To(BeNil())
if err != nil {
log.Fatal("Error loading .env file")
}
}
func dbConnect() {
getPostgresConfig := func() *repos.PostgresConfig {
return &repos.PostgresConfig{
DataSourceName: os.Getenv("POSTGRES_DSN"),
}
}
repo = repos.NewRepo(getPostgresConfig())
}
var truncateCart = func() {
_, err := repo.GetRawDB().Exec(`TRUNCATE cart RESTART IDENTITY CASCADE;`)
Expect(err).To(BeNil())
}
var truncateCartItem = func() {
_, err := repo.GetRawDB().Exec(`TRUNCATE cart_item RESTART IDENTITY CASCADE;`)
Expect(err).To(BeNil())
}
| [
"\"POSTGRES_DSN\""
]
| []
| [
"POSTGRES_DSN"
]
| [] | ["POSTGRES_DSN"] | go | 1 | 0 | |
src/slash-command.go | package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/nlopes/slack"
log "github.com/sirupsen/logrus"
)
type SlashResponse struct {
ResponseType string `json:"response_type"`
Text string `json:"text"`
}
// ******************************************************************************
// Name : slackCommandResponse
// Description: Function to make a POST request to send response back to Slack
// ******************************************************************************
func slackCommandResponse(response SlashResponse, s slack.SlashCommand) {
json, _ := json.Marshal(response)
reqBody := bytes.NewBuffer(json)
endpoint := s.ResponseURL
req, err := http.NewRequest("POST", endpoint, reqBody)
if err != nil {
log.Error("slackCommandResponse Build Request Error: ", err)
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", os.Getenv("SLACK_ACCESS_TOKEN")))
client := &http.Client{}
req = req.WithContext(context.Background())
resp, err := client.Do(req)
if err != nil {
log.Error("slackCommandResponse POST Request Error: ", err)
return
}
defer resp.Body.Close()
}
// ******************************************************************************
// Name : slashHelpResponse
// Description: Function to send help response
// ******************************************************************************
func slashHelpResponse(s slack.SlashCommand) {
response := SlashResponse{"ephemeral", helpMessage}
slackCommandResponse(response, s)
}
| [
"\"SLACK_ACCESS_TOKEN\""
]
| []
| [
"SLACK_ACCESS_TOKEN"
]
| [] | ["SLACK_ACCESS_TOKEN"] | go | 1 | 0 | |
tests/test_dff_custom_sim.py |
from cocotb_test.simulator import Icarus, Ius, run
import pytest
import os
hdl_dir = os.path.dirname(__file__)
class IcarusCustom(Icarus):
def run_command(self):
return ["vvp", "-v", "-l", self.logfile, "-M", self.lib_dir, "-m", "libcocotbvpi_icarus", self.sim_file]
@pytest.fixture(scope="module", autouse=True)
def module_run_at_beginning(request):
print("\n\nIn module_run_at_beginning()\n\n")
def module_run_at_end():
print("\n\nIn module_run_at_end()\n\n")
request.addfinalizer(module_run_at_end)
@pytest.mark.skipif(os.getenv("SIM") != "icarus", reason="Custom for Icarus")
def test_dff_custom_icarus():
IcarusCustom(
verilog_sources=[os.path.join(hdl_dir, "dff.v")],
toplevel="dff_test",
python_search=[hdl_dir],
module="dff_cocotb",
logfile="custom_log.log", # extra custom argument
).run()
class IusCustom(Ius):
def build_command(self):
cmd = [
"xrun",
"-loadvpi",
os.path.join(self.lib_dir, "libvpi." + self.lib_ext) + ":vlog_startup_routines_bootstrap",
"-plinowarn",
"-access",
"+rwc",
"-f",
self.defsfile,
]
return [cmd]
@pytest.mark.skipif(os.getenv("SIM") != "ius", reason="Custom for IUS")
def test_dff_custom_ius():
run(simulator=IusCustom, toplevel="dff", python_search=[hdl_dir], module="dff_cocotb", defsfile="ius_defines.f") # extra custom argument
| []
| []
| [
"SIM"
]
| [] | ["SIM"] | python | 1 | 0 | |
extensions/amazon-lambda/common-runtime/src/main/java/io/quarkus/amazon/lambda/runtime/AbstractLambdaPollLoop.java | package io.quarkus.amazon.lambda.runtime;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.SocketException;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.jboss.logging.Logger;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import io.quarkus.runtime.Application;
import io.quarkus.runtime.LaunchMode;
import io.quarkus.runtime.ShutdownContext;
public abstract class AbstractLambdaPollLoop {
private static final Logger log = Logger.getLogger(AbstractLambdaPollLoop.class);
private final ObjectMapper objectMapper;
private final ObjectReader cognitoIdReader;
private final ObjectReader clientCtxReader;
private final LaunchMode launchMode;
private static final String LAMBDA_TRACE_HEADER_PROP = "com.amazonaws.xray.traceHeader";
public AbstractLambdaPollLoop(ObjectMapper objectMapper, ObjectReader cognitoIdReader, ObjectReader clientCtxReader,
LaunchMode launchMode) {
this.objectMapper = objectMapper;
this.cognitoIdReader = cognitoIdReader;
this.clientCtxReader = clientCtxReader;
this.launchMode = launchMode;
}
protected abstract boolean isStream();
protected HttpURLConnection requestConnection = null;
public void startPollLoop(ShutdownContext context) {
final AtomicBoolean running = new AtomicBoolean(true);
// flag to check whether to interrupt.
final AtomicBoolean shouldInterrupt = new AtomicBoolean(true);
String baseUrl = AmazonLambdaApi.baseUrl();
final Thread pollingThread = new Thread(new Runnable() {
@SuppressWarnings("unchecked")
@Override
public void run() {
try {
if (!LambdaHotReplacementRecorder.enabled
&& (launchMode == LaunchMode.DEVELOPMENT || launchMode == LaunchMode.NORMAL)) {
// when running with continuous testing, this method fails
// because currentApplication is not set when running as an
// auxiliary application. So, just skip it if hot replacement enabled.
// This method is called to determine if Quarkus is started and ready to receive requests.
checkQuarkusBootstrapped();
}
URL requestUrl = AmazonLambdaApi.invocationNext(baseUrl);
if (AmazonLambdaApi.isTestMode()) {
// FYI: This log is required as native test runner
// looks for "Listening on" in log to ensure native executable booted
log.info("Listening on: " + requestUrl.toString());
}
while (running.get()) {
try {
requestConnection = (HttpURLConnection) requestUrl.openConnection();
} catch (IOException e) {
if (!running.get()) {
// just return gracefully as we were probably shut down by
// shutdown task
return;
}
if (abortGracefully(e)) {
return;
}
throw e;
}
try {
String requestId = requestConnection.getHeaderField(AmazonLambdaApi.LAMBDA_RUNTIME_AWS_REQUEST_ID);
if (requestConnection.getResponseCode() != 200) {
// connection should be closed by finally clause
continue;
}
try {
if (LambdaHotReplacementRecorder.enabled && launchMode == LaunchMode.DEVELOPMENT) {
try {
// do not interrupt during a hot replacement
// as shutdown will abort and do nasty things.
shouldInterrupt.set(false);
if (LambdaHotReplacementRecorder.checkHotReplacement()) {
// hot replacement happened in dev mode
// so we requeue the request as quarkus will restart
// and the message will not be processed
// FYI: this requeue endpoint is something only the mock event server implements
requeue(baseUrl, requestId);
return;
}
} finally {
shouldInterrupt.set(true);
}
}
String traceId = requestConnection.getHeaderField(AmazonLambdaApi.LAMBDA_TRACE_HEADER_KEY);
if (traceId != null) {
System.setProperty(LAMBDA_TRACE_HEADER_PROP, traceId);
}
URL url = AmazonLambdaApi.invocationResponse(baseUrl, requestId);
if (isStream()) {
HttpURLConnection responseConnection = responseStream(url);
if (running.get()) {
processRequest(requestConnection.getInputStream(), responseConnection.getOutputStream(),
createContext(requestConnection));
while (responseConnection.getInputStream().read() != -1) {
// Read data
}
}
} else {
Object input = null;
if (running.get()) {
LambdaInputReader inputReader = getInputReader();
if (inputReader != null) {
input = inputReader.readValue(requestConnection.getInputStream());
}
Object output = processRequest(input, createContext(requestConnection));
postResponse(url, output);
}
}
} catch (Exception e) {
if (abortGracefully(e)) {
return;
}
log.error("Failed to run lambda (" + launchMode + ")", e);
postError(AmazonLambdaApi.invocationError(baseUrl, requestId),
new FunctionError(e.getClass().getName(), e.getMessage()));
continue;
}
} catch (Exception e) {
if (!abortGracefully(e))
log.error("Error running lambda (" + launchMode + ")", e);
Application app = Application.currentApplication();
if (app != null) {
try {
app.stop();
} catch (Exception ignored) {
}
}
return;
} finally {
try {
requestConnection.getInputStream().close();
} catch (IOException ignored) {
}
}
}
} catch (Exception e) {
try {
log.error("Lambda init error (" + launchMode + ")", e);
postError(AmazonLambdaApi.initError(baseUrl),
new FunctionError(e.getClass().getName(), e.getMessage()));
} catch (Exception ex) {
log.error("Failed to report init error (" + launchMode + ")", ex);
} finally {
// our main loop is done, time to shutdown
Application app = Application.currentApplication();
if (app != null) {
log.error("Shutting down Quarkus application because of error (" + launchMode + ")");
app.stop();
}
}
} finally {
log.info("Lambda polling thread complete (" + launchMode + ")");
}
}
}, "Lambda Thread (" + launchMode + ")");
pollingThread.setDaemon(true);
context.addShutdownTask(() -> {
running.set(false);
try {
//note that interrupting does not seem to be 100% reliable in unblocking the thread
requestConnection.disconnect();
} catch (Exception ignore) {
}
if (shouldInterrupt.get()) {
pollingThread.interrupt();
}
});
pollingThread.start();
}
/**
* Invoke actual app code with unmarshalled input.
*
* @param input unmarshalled input (probably from json)
* @param context
* @return Unmarshalled Java output (will probably be marshalled to json)
* @throws Exception
*/
protected abstract Object processRequest(Object input, AmazonLambdaContext context) throws Exception;
protected abstract void processRequest(InputStream input, OutputStream output, AmazonLambdaContext context)
throws Exception;
protected abstract LambdaInputReader getInputReader();
protected abstract LambdaOutputWriter getOutputWriter();
protected AmazonLambdaContext createContext(HttpURLConnection requestConnection) throws IOException {
return new AmazonLambdaContext(requestConnection, cognitoIdReader, clientCtxReader);
}
private void checkQuarkusBootstrapped() {
// todo we need a better way to do this.
if (Application.currentApplication() == null) {
throw new RuntimeException("Quarkus initialization error");
}
String[] args = {};
Application.currentApplication().start(args);
}
protected void postResponse(URL url, Object response) throws IOException {
HttpURLConnection responseConnection = (HttpURLConnection) url.openConnection();
if (response != null) {
getOutputWriter().writeHeaders(responseConnection);
}
responseConnection.setDoOutput(true);
responseConnection.setRequestMethod("POST");
if (response != null) {
getOutputWriter().writeValue(responseConnection.getOutputStream(), response);
}
while (responseConnection.getInputStream().read() != -1) {
// Read data
}
}
protected void requeue(String baseUrl, String requestId) throws IOException {
URL url = AmazonLambdaApi.requeue(baseUrl, requestId);
HttpURLConnection responseConnection = (HttpURLConnection) url.openConnection();
responseConnection.setDoOutput(true);
responseConnection.setRequestMethod("POST");
while (responseConnection.getInputStream().read() != -1) {
// Read data
}
}
protected void postError(URL url, Object response) throws IOException {
HttpURLConnection responseConnection = (HttpURLConnection) url.openConnection();
responseConnection.setRequestProperty("Content-Type", "application/json");
responseConnection.setDoOutput(true);
responseConnection.setRequestMethod("POST");
objectMapper.writeValue(responseConnection.getOutputStream(), response);
while (responseConnection.getInputStream().read() != -1) {
// Read data
}
}
protected HttpURLConnection responseStream(URL url) throws IOException {
HttpURLConnection responseConnection = (HttpURLConnection) url.openConnection();
responseConnection.setDoOutput(true);
responseConnection.setRequestMethod("POST");
return responseConnection;
}
boolean abortGracefully(Exception ex) {
// if we are running in test mode, or native mode outside of the lambda container, then don't output stack trace for socket errors
boolean lambdaEnv = System.getenv("AWS_LAMBDA_RUNTIME_API") != null;
boolean testOrDevEnv = LaunchMode.current() == LaunchMode.TEST || LaunchMode.current() == LaunchMode.DEVELOPMENT;
boolean graceful = ((ex instanceof SocketException) && testOrDevEnv)
|| (ex instanceof UnknownHostException && !lambdaEnv);
if (graceful)
log.warn("Aborting lambda poll loop: " + (lambdaEnv ? "no lambda container found" : "ending dev/test mode"));
return graceful;
}
}
| [
"\"AWS_LAMBDA_RUNTIME_API\""
]
| []
| [
"AWS_LAMBDA_RUNTIME_API"
]
| [] | ["AWS_LAMBDA_RUNTIME_API"] | java | 1 | 0 | |
object_tracker_darklabel.py | import os
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_visible_devices(physical_devices[0], 'GPU')
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
# deep sort imports
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
# from action_class_helper import ActionClassHelper
from opencv_ffmpeg import FFmpegVideoCapture
import common_util
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-608',
'path to weights file')
flags.DEFINE_integer('size', 608, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/video/test.mp4', 'path to input video or set to 0 for webcam')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'MP4V', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.50, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', False, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', False, 'count objects being tracked on screen')
LABEL_TEMPLATE = "{frame_id},2.Walk,{obj_id},{x},{y},{w},{h}"
def main(_argv):
# Definition of the parameters
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
# load tflite model if flag is set
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
# otherwise load standard tensorflow saved model
else:
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
# begin video capture
# try:
# vid = cv2.VideoCapture(int(video_path))
# except:
# vid = cv2.VideoCapture(video_path)
vid = cv2.VideoCapture(video_path)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_ffmpeg = FFmpegVideoCapture(video_path, width, height, "bgr24")
out = None
# get video ready to save locally if flag is set
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
# print("CAP_PROP_ORIENTATION_META:", vid.get(cv2.CAP_PROP_ORIENTATION_META))
# print("CAP_PROP_ORIENTATION_AUTO:", vid.get(cv2.CAP_PROP_ORIENTATION_AUTO))
# return
# actionClassHelper = ActionClassHelper(fps=30)
label_lines = []
frame_num = -1
# while video is running
while True:
frame_num +=1
# return_value, frame = vid.read()
return_value, frame = vid_ffmpeg.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# frame=cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
print('Frame #: ', frame_num)
# if frame_num < 32000:
# frame_num += 1
# continue
# if frame_num == 500:
# break
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
# run detections on tflite if flag is set
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
# run detections using yolov3 if flag is set
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
# print(classes)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
# allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
# allowed_classes = ['person', 'car']
allowed_classes = ['person']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
if FLAGS.count:
cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 255, 0), 2)
print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)]
#initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)
# action class 체크
track_id = int(track.track_id)
action_box = (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
# ret = actionClassHelper.put(track_id, action_box, frame)
# print(track_id, action_box, "==>", ret)
# if enable info flag then print details about each track
if FLAGS.info:
print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))
x = action_box[0]
y = action_box[1]
w = action_box[2] - action_box[0]
h = action_box[3] - action_box[1]
label_line = LABEL_TEMPLATE.format(frame_id=frame_num-1, obj_id=track_id, x=x, y=y, w=w, h=h)
print("label_line:", label_line)
label_lines.append(label_line)
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
print("FPS: %.2f" % fps)
# result = np.asarray(frame)
# result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# if not FLAGS.dont_show:
# cv2.imshow("Output Video", result)
# if output flag is set, save video file
# if FLAGS.output:
# out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
# cv2.destroyAllWindows()
# DarkLabel 저장
# output_label_path = os.path.join("output_label", os.path.basename(video_path) + ".txt")
common_util.save_lines(FLAGS.output, label_lines)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
setup.py | #!usr/bin/env python
from setuptools import setup, find_packages
import sys
import os
packages = find_packages(exclude=['docs', 'notebooks', 'assets'])
install_requires = [
'numpy>=1.17',
'pandas',
'networkx',
'cvxpy',
'matplotlib',
'seaborn',
'scikit-learn',
'scikit-learn-extra',
'stable-baselines3',
'pyglet',
'joblib',
'gym',
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='or-suite',
version='0.0.1',
description='OR-Suite: A set of environments for developing reinforcement learning agents for OR problems.',
long_descrption=long_description,
long_description_content_type='text/markdown',
author='Christopher Archer, Siddhartha Banerjee, Shashank Pathak, Carrie Rucker, Sean Sinclair, Christina Yu',
author_email = '[email protected]',
license='MIT',
url='https://github.com/seanrsinclair/ORSuite',
packages=packages,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=install_requires,
zip_safe=False,
)
| []
| []
| []
| [] | [] | python | null | null | null |
ci/bootstrap.py | import argparse
import asyncio
import base64
import json
import os
from shlex import quote as shq
from typing import Dict, List, Optional, Tuple
import kubernetes_asyncio.client
import kubernetes_asyncio.config
from batch.driver.k8s_cache import K8sCache
from ci.build import BuildConfiguration, Code
from ci.environment import KUBERNETES_SERVER_URL, STORAGE_URI
from ci.github import clone_or_fetch_script
from ci.utils import generate_token
from hailtop.utils import check_shell_output
BATCH_WORKER_IMAGE = os.environ['BATCH_WORKER_IMAGE']
def populate_secret_host_path(host_path: str, secret_data: Dict[str, bytes]):
os.makedirs(host_path)
if secret_data is not None:
for filename, data in secret_data.items():
with open(f'{host_path}/{filename}', 'wb') as f:
f.write(base64.b64decode(data))
class LocalJob:
def __init__(
self,
index: int,
image: str,
command: List[str],
*,
env: Optional[Dict[str, str]] = None,
mount_docker_socket: bool = False,
unconfined: bool = False,
secrets: Optional[List[Dict[str, str]]] = None,
service_account: Optional[Dict[str, str]] = None,
attributes: Optional[Dict[str, str]] = None,
parents: Optional[List['LocalJob']] = None,
input_files: Optional[List[Tuple[str, str]]] = None,
output_files: Optional[List[Tuple[str, str]]] = None,
**kwargs,
):
self._index = index
self._image = image
self._command = command
self._env = env
self._mount_docker_socket = mount_docker_socket
self._unconfined = unconfined
self._parents = parents
self._secrets = secrets
self._service_account = service_account
self._attributes = attributes
self._input_files = input_files
self._output_files = output_files
self._kwargs = kwargs
self._succeeded: Optional[bool] = None
async def docker_run(*args: str):
script = ' '.join([shq(a) for a in args])
outerr = await check_shell_output(script)
print(f'Container output: {outerr[0]!r}\n' f'Container error: {outerr[1]!r}')
cid = outerr[0].decode('ascii').strip()
outerr = await check_shell_output(f'docker wait {cid}')
exit_code = int(outerr[0].decode('ascii').strip())
return cid, exit_code == 0
class LocalBatchBuilder:
def __init__(self, attributes: Dict[str, str], callback: Optional[str]):
self._attributes = attributes
self._callback = callback
self._jobs: List[LocalJob] = []
@property
def attributes(self) -> Dict[str, str]:
return self._attributes
@property
def callback(self) -> Optional[str]:
return self._callback
def create_job(self, image: str, command: List[str], **kwargs):
index = len(self._jobs)
job = LocalJob(index, image, command, **kwargs)
self._jobs.append(job)
return job
async def run(self):
cwd = os.getcwd()
assert cwd.startswith('/')
batch_token = self._attributes['token']
root = f'{cwd}/_/{batch_token}'
os.makedirs(f'{root}/shared')
prefix = f'{STORAGE_URI}/build/{batch_token}'
for j in self._jobs:
job_name = j._attributes.get('name')
print(f'{j._index}: {job_name}: running...')
if j._parents:
for p in j._parents:
assert p._succeeded is not None
if not p._succeeded:
print(f'{j._index}: {job_name}: SKIPPED: parent {p._index} failed')
j._succeeded = False
if j._succeeded is False:
continue
job_root = f'{root}/{j._index}'
os.makedirs(f'{job_root}/io')
os.makedirs(f'{job_root}/secrets')
if j._input_files:
files = []
for src, dest in j._input_files:
assert src.startswith(prefix), (prefix, src)
src = f'/shared{src[len(prefix):]}'
files.append(
{
'from': src,
'to': dest,
}
)
input_cid, input_ok = await docker_run(
'docker',
'run',
'-d',
'-v',
f'{root}/shared:/shared',
'-v',
f'{job_root}/io:/io',
'--entrypoint',
'/usr/bin/python3',
BATCH_WORKER_IMAGE,
'-m',
'hailtop.aiotools.copy',
json.dumps(None),
json.dumps(files),
)
print(f'{j._index}: {job_name}/input: {input_cid} {"OK" if input_ok else "FAILED"}')
else:
input_ok = True
if input_ok:
mount_options = ['-v', f'{job_root}/io:/io']
env_options = []
if j._env:
for key, value in j._env.items():
env_options.extend(['-e', f'{key}={value}'])
# Reboot the cache on each use. The kube client isn't
# refreshing tokens correctly.
# https://github.com/kubernetes-client/python/issues/741
# Note, that is in the kubenetes-client repo, the
# kubernetes_asyncio. I'm assuming it has the same
# issue.
k8s_client = kubernetes_asyncio.client.CoreV1Api()
try:
k8s_cache = K8sCache(k8s_client)
if j._service_account:
namespace = j._service_account['namespace']
name = j._service_account['name']
sa = await k8s_cache.read_service_account(name, namespace)
assert len(sa.secrets) == 1
token_secret_name = sa.secrets[0].name
secret = await k8s_cache.read_secret(token_secret_name, namespace)
token = base64.b64decode(secret.data['token']).decode()
cert = secret.data['ca.crt']
kube_config = f'''
apiVersion: v1
clusters:
- cluster:
certificate-authority: /.kube/ca.crt
server: {KUBERNETES_SERVER_URL}
name: default-cluster
contexts:
- context:
cluster: default-cluster
user: {namespace}-{name}
namespace: {namespace}
name: default-context
current-context: default-context
kind: Config
preferences: {{}}
users:
- name: {namespace}-{name}
user:
token: {token}
'''
dot_kube_dir = f'{job_root}/secrets/.kube'
os.makedirs(dot_kube_dir)
with open(f'{dot_kube_dir}/config', 'w') as f:
f.write(kube_config)
with open(f'{dot_kube_dir}/ca.crt', 'w') as f:
f.write(base64.b64decode(cert).decode())
mount_options.extend(['-v', f'{dot_kube_dir}:/.kube'])
env_options.extend(['-e', 'KUBECONFIG=/.kube/config'])
secrets = j._secrets
if secrets:
k8s_secrets = await asyncio.gather(
*[k8s_cache.read_secret(secret['name'], secret['namespace']) for secret in secrets]
)
for secret, k8s_secret in zip(secrets, k8s_secrets):
secret_host_path = f'{job_root}/secrets/{k8s_secret.metadata.name}'
populate_secret_host_path(secret_host_path, k8s_secret.data)
mount_options.extend(['-v', f'{secret_host_path}:{secret["mount_path"]}'])
if j._mount_docker_socket:
mount_options.extend(['-v', '/var/run/docker.sock:/var/run/docker.sock'])
if j._unconfined:
security_options = [
'--security-opt',
'seccomp=unconfined',
'--security-opt',
'apparmor=unconfined',
]
else:
security_options = []
main_cid, main_ok = await docker_run(
'docker',
'run',
'-d',
*env_options,
*mount_options,
*security_options,
'--entrypoint',
j._command[0],
j._image,
*j._command[1:],
)
print(f'{j._index}: {job_name}/main: {main_cid} {"OK" if main_ok else "FAILED"}')
finally:
await k8s_client.api_client.rest_client.pool_manager.close()
else:
main_ok = False
print(f'{j._index}: {job_name}/main: SKIPPED: input failed')
if j._output_files:
if main_ok:
files = []
for src, dest in j._output_files:
assert dest.startswith(prefix), (prefix, dest)
dest = f'/shared{dest[len(prefix):]}'
files.append(
{
'from': src,
'to': dest,
}
)
output_cid, output_ok = await docker_run(
'docker',
'run',
'-d',
'-v',
f'{root}/shared:/shared',
'-v',
f'{job_root}/io:/io',
'--entrypoint',
'/usr/bin/python3',
BATCH_WORKER_IMAGE,
'-m',
'hailtop.aiotools.copy',
json.dumps(None),
json.dumps(files),
)
print(f'{j._index}: {job_name}/output: {output_cid} {"OK" if output_ok else "FAILED"}')
else:
output_ok = False
print(f'{j._index}: {job_name}/output: SKIPPED: main failed')
else:
output_ok = True
j._succeeded = input_ok and main_ok and output_ok
class Branch(Code):
def __init__(self, owner: str, repo: str, branch: str, sha: str, extra_config: Dict[str, str]):
self._owner = owner
self._repo = repo
self._branch = branch
self._sha = sha
self._extra_config = extra_config
def short_str(self) -> str:
return f'br-{self._owner}-{self._repo}-{self._branch}'
def repo_url(self) -> str:
return f'https://github.com/{self._owner}/{self._repo}'
def config(self) -> Dict[str, str]:
config = {
'checkout_script': self.checkout_script(),
'branch': self._branch,
'repo': f'{self._owner}/{self._repo}',
'repo_url': self.repo_url(),
'sha': self._sha,
}
config.update(self._extra_config)
return config
def checkout_script(self) -> str:
return f'''
{clone_or_fetch_script(self.repo_url())}
git checkout {shq(self._sha)}
'''
def repo_dir(self) -> str:
return '.'
async def main():
await kubernetes_asyncio.config.load_kube_config()
parser = argparse.ArgumentParser(description='Bootstrap a Hail as a service installation.')
parser.add_argument(
'--extra-code-config', dest='extra_code_config', default='{}', help='Extra code config in JSON format.'
)
parser.add_argument(
'branch', help='Github branch to run. It should be the same branch bootstrap.py is being run from.'
)
parser.add_argument('sha', help='SHA of the git commit to run. It should match the branch.')
parser.add_argument('steps', help='The requested steps to execute.')
args = parser.parse_args()
branch_pieces = args.branch.split(":")
assert len(branch_pieces) == 2, f'{branch_pieces} {args.branch}'
repo_pieces = branch_pieces[0].split("/")
assert len(repo_pieces) == 2, f'{repo_pieces} {branch_pieces[0]}'
owner = repo_pieces[0]
repo_name = repo_pieces[1]
branch_name = branch_pieces[1]
extra_code_config = json.loads(args.extra_code_config)
scope = 'deploy'
code = Branch(owner, repo_name, branch_name, args.sha, extra_code_config)
steps = [s.strip() for s in args.steps.split(',')]
with open('build.yaml', 'r') as f:
config = BuildConfiguration(code, f.read(), scope, requested_step_names=steps)
token = generate_token()
batch = LocalBatchBuilder(attributes={'token': token}, callback=None)
config.build(batch, code, scope)
await batch.run()
asyncio.get_event_loop().run_until_complete(main())
| []
| []
| [
"BATCH_WORKER_IMAGE"
]
| [] | ["BATCH_WORKER_IMAGE"] | python | 1 | 0 | |
tests/unit/loader/test_tf_dataloader.py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import os
import subprocess
from merlin.core.dispatch import HAS_GPU, make_df
try:
import cupy
except ImportError:
cupy = None
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import roc_auc_score
import nvtabular as nvt
import nvtabular.tools.data_gen as datagen
from nvtabular import ops
from nvtabular.io import Dataset
tf = pytest.importorskip("tensorflow")
# If tensorflow isn't installed skip these tests. Note that the
# tf_dataloader import needs to happen after this line
tf_dataloader = pytest.importorskip("nvtabular.loader.tensorflow")
def test_nested_list():
num_rows = 100
batch_size = 12
df = pd.DataFrame(
{
"data": [
np.random.rand(np.random.randint(10) + 1, 3).tolist() for i in range(num_rows)
],
"data2": [np.random.rand(np.random.randint(10) + 1).tolist() for i in range(num_rows)],
"label": [np.random.rand() for i in range(num_rows)],
}
)
train_dataset = tf_dataloader.KerasSequenceLoader(
Dataset(df),
cont_names=["data", "data2"],
label_names=["label"],
batch_size=batch_size,
shuffle=False,
)
batch = next(iter(train_dataset))
# [[1,2,3],[3,1],[...],[]]
nested_data_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data"][0][:, 0], tf.cast(batch[0]["data"][1][:, 0], tf.int32)
).to_tensor()
true_data_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 0].tolist()).to_tensor(), [batch_size, -1]
)
# [1,2,3]
multihot_data2_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data2"][0][:, 0], tf.cast(batch[0]["data2"][1][:, 0], tf.int32)
).to_tensor()
true_data2_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 1].tolist()).to_tensor(), [batch_size, -1]
)
assert nested_data_col.shape == true_data_col.shape
assert np.allclose(nested_data_col.numpy(), true_data_col.numpy())
assert multihot_data2_col.shape == true_data2_col.shape
assert np.allclose(multihot_data2_col.numpy(), true_data2_col.numpy())
def test_shuffling():
num_rows = 10000
batch_size = 10000
df = pd.DataFrame({"a": np.asarray(range(num_rows)), "b": np.asarray([0] * num_rows)})
train_dataset = tf_dataloader.KerasSequenceLoader(
Dataset(df), cont_names=["a"], label_names=["b"], batch_size=batch_size, shuffle=True
)
batch = next(iter(train_dataset))
first_batch = tf.reshape(tf.cast(batch[0]["a"].cpu(), tf.int32), (batch_size,))
in_order = tf.range(0, batch_size, dtype=tf.int32)
assert (first_batch != in_order).numpy().any()
assert (tf.sort(first_batch) == in_order).numpy().all()
@pytest.mark.parametrize("batch_size", [10, 9, 8])
@pytest.mark.parametrize("drop_last", [True, False])
@pytest.mark.parametrize("num_rows", [100])
def test_tf_drp_reset(tmpdir, batch_size, drop_last, num_rows):
df = make_df(
{
"cat1": [1] * num_rows,
"cat2": [2] * num_rows,
"cat3": [3] * num_rows,
"label": [0] * num_rows,
"cont3": [3.0] * num_rows,
"cont2": [2.0] * num_rows,
"cont1": [1.0] * num_rows,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
label_names=label_name,
shuffle=False,
drop_last=drop_last,
)
all_len = len(data_itr) if drop_last else len(data_itr) - 1
all_rows = 0
for idx, (X, y) in enumerate(data_itr):
all_rows += len(X["cat1"])
if idx < all_len:
assert list(X["cat1"].numpy()) == [1] * batch_size
assert list(X["cat2"].numpy()) == [2] * batch_size
assert list(X["cat3"].numpy()) == [3] * batch_size
assert list(X["cont1"].numpy()) == [1.0] * batch_size
assert list(X["cont2"].numpy()) == [2.0] * batch_size
assert list(X["cont3"].numpy()) == [3.0] * batch_size
if drop_last and num_rows % batch_size > 0:
assert num_rows > all_rows
else:
assert num_rows == all_rows
def test_tf_catname_ordering(tmpdir):
df = make_df(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"cont3": [3.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
)
for X, y in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(X["cont3"].numpy()) == [3.0] * 10
def test_tf_map(tmpdir):
df = make_df(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"sample_weight": [1.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["sample_weight", "cont2", "cont1"]
label_name = ["label"]
def add_sample_weight(features, labels, sample_weight_col_name="sample_weight"):
sample_weight = tf.cast(features.pop(sample_weight_col_name) > 0, tf.float32)
return features, labels, sample_weight
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
).map(add_sample_weight)
for X, y, sample_weight in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(sample_weight.numpy()) == [1.0] * 10
# TODO: include use_columns option
# TODO: include parts_per_chunk test
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.06])
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("use_paths", [True, False])
@pytest.mark.parametrize("cpu_true", [False, True])
@pytest.mark.parametrize("device", ["cpu", 0])
def test_tf_gpu_dl(
tmpdir, paths, use_paths, device, cpu_true, dataset, batch_size, gpu_memory_frac, engine
):
cont_names = ["x", "y", "id"]
cat_names = ["name-string"]
label_name = ["label"]
if engine == "parquet":
cat_names.append("name-cat")
columns = cont_names + cat_names
conts = cont_names >> ops.FillMedian() >> ops.Normalize()
cats = cat_names >> ops.Categorify()
workflow = nvt.Workflow(conts + cats + label_name)
workflow.fit(dataset)
workflow.transform(dataset).to_parquet(tmpdir + "/processed")
data_itr = tf_dataloader.KerasSequenceLoader(
str(tmpdir + "/processed"), # workflow.transform(dataset),
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
buffer_size=gpu_memory_frac,
label_names=label_name,
engine=engine,
shuffle=False,
device=device,
reader_kwargs={"cpu": cpu_true},
)
_ = tf.random.uniform((1,))
rows = 0
for idx in range(len(data_itr)):
X, y = next(data_itr)
# first elements to check epoch-to-epoch consistency
if idx == 0:
X0, y0 = X, y
# check that we have at most batch_size elements
num_samples = y.shape[0]
if num_samples != batch_size:
try:
next(data_itr)
except StopIteration:
rows += num_samples
continue
else:
raise ValueError("Batch size too small at idx {}".format(idx))
# check that all the features in X have the
# appropriate length and that the set of
# their names is exactly the set of names in
# `columns`
these_cols = columns.copy()
for column, x in X.items():
try:
these_cols.remove(column)
except ValueError as e:
raise AssertionError from e
assert x.shape[0] == num_samples
assert len(these_cols) == 0
rows += num_samples
assert (idx + 1) * batch_size >= rows
row_count = (60 * 24 * 3 + 1) if HAS_GPU else (60 * 24 * 3)
assert rows == row_count
# if num_samples is equal to batch size,
# we didn't exhaust the iterator and do
# cleanup. Try that now
if num_samples == batch_size:
try:
next(data_itr)
except StopIteration:
pass
else:
raise ValueError
assert not data_itr._working
assert data_itr._batch_itr is None
# check start of next epoch to ensure consistency
X, y = next(data_itr)
assert (y.numpy() == y0.numpy()).all()
for column, x in X.items():
x0 = X0.pop(column)
assert (x.numpy() == x0.numpy()).all()
assert len(X0) == 0
data_itr.stop()
assert not data_itr._working
assert data_itr._batch_itr is None
@pytest.mark.parametrize("batch_size", [1, 2, 3])
def test_mh_support(tmpdir, batch_size):
data = {
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Reviewers": [
["User_A"],
["User_A", "User_E"],
["User_B", "User_C"],
["User_C"],
],
"Engaging User": ["User_B", "User_B", "User_A", "User_D"],
"Embedding": [
[0.1, 0.2, 0.3],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8],
[0.8, 0.4, 0.2],
],
"Post": [1, 2, 3, 4],
}
df = make_df(data)
cat_names = ["Authors", "Reviewers", "Engaging User"]
cont_names = ["Embedding"]
label_name = ["Post"]
if HAS_GPU:
cats = cat_names >> ops.HashBucket(num_buckets=10)
else:
cats = cat_names >> ops.Categorify()
workflow = nvt.Workflow(cats + cont_names + label_name)
data_itr = tf_dataloader.KerasSequenceLoader(
workflow.fit_transform(nvt.Dataset(df)),
cat_names=cat_names,
cont_names=cont_names,
label_names=label_name,
batch_size=batch_size,
shuffle=False,
)
nnzs = None
idx = 0
for X, y in data_itr:
assert len(X) == 4
n_samples = y.shape[0]
for mh_name in ["Authors", "Reviewers", "Embedding"]:
# assert (mh_name) in X
array, nnzs = X[mh_name]
nnzs = nnzs.numpy()[:, 0]
array = array.numpy()[:, 0]
if mh_name == "Embedding":
assert (nnzs == 3).all()
else:
lens = [
len(x) for x in data[mh_name][idx * batch_size : idx * batch_size + n_samples]
]
assert (nnzs == np.array(lens)).all()
if mh_name == "Embedding":
assert len(array) == (n_samples * 3)
else:
assert len(array) == sum(lens)
idx += 1
assert idx == (3 // batch_size + 1)
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_validater(tmpdir, batch_size):
n_samples = 9
rand = np.random.RandomState(0)
gdf = make_df({"a": rand.randn(n_samples), "label": rand.randint(2, size=n_samples)})
dataloader = tf_dataloader.KerasSequenceLoader(
nvt.Dataset(gdf),
batch_size=batch_size,
cat_names=[],
cont_names=["a"],
label_names=["label"],
shuffle=False,
)
input_ = tf.keras.Input(name="a", dtype=tf.float32, shape=(1,))
x = tf.keras.layers.Dense(128, "relu")(input_)
x = tf.keras.layers.Dense(1, activation="softmax")(x)
model = tf.keras.Model(inputs=input_, outputs=x)
model.compile("sgd", "binary_crossentropy", metrics=["accuracy", tf.keras.metrics.AUC()])
validater = tf_dataloader.KerasSequenceValidater(dataloader)
model.fit(dataloader, epochs=2, verbose=0, callbacks=[validater])
predictions, labels = [], []
for X, y_true in dataloader:
y_pred = model(X)
labels.extend(y_true.numpy()[:, 0])
predictions.extend(y_pred.numpy()[:, 0])
predictions = np.array(predictions)
labels = np.array(labels)
logs = {}
validater.on_epoch_end(0, logs)
auc_key = [i for i in logs if i.startswith("val_auc")][0]
true_accuracy = (labels == (predictions > 0.5)).mean()
estimated_accuracy = logs["val_accuracy"]
assert np.isclose(true_accuracy, estimated_accuracy, rtol=1e-6)
true_auc = roc_auc_score(labels, predictions)
estimated_auc = logs[auc_key]
assert np.isclose(true_auc, estimated_auc, rtol=1e-6)
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("global_rank", [0, 1])
def test_multigpu_partitioning(datasets, engine, batch_size, global_rank):
cont_names = ["x", "y", "id"]
cat_names = ["name-string", "name-cat"]
label_name = ["label"]
data_loader = tf_dataloader.KerasSequenceLoader(
str(datasets["parquet"]),
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
buffer_size=0.1,
label_names=label_name,
engine=engine,
shuffle=False,
global_size=2,
global_rank=global_rank,
)
indices = data_loader._gather_indices_for_dev(None)
assert indices == [global_rank]
@pytest.mark.parametrize("sparse_dense", [False, True])
def test_sparse_tensors(tmpdir, sparse_dense):
# create small dataset, add values to sparse_list
json_sample = {
"conts": {},
"cats": {
"spar1": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 2,
"multi_max": 4,
"multi_avg": 3,
},
"spar2": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 3,
"multi_max": 5,
"multi_avg": 4,
},
# "": {"dtype": None, "cardinality": 500, "min_entry_size": 1, "max_entry_size": 5},
},
"labels": {"rating": {"dtype": None, "cardinality": 2}},
}
cols = datagen._get_cols_from_schema(json_sample)
df_gen = datagen.DatasetGen(datagen.UniformDistro(), gpu_frac=0.0001)
target_path = os.path.join(tmpdir, "input/")
os.mkdir(target_path)
df_files = df_gen.full_df_create(10000, cols, output=target_path)
spa_lst = ["spar1", "spar2"]
spa_mx = {"spar1": 5, "spar2": 6}
batch_size = 10
data_itr = tf_dataloader.KerasSequenceLoader(
df_files,
cat_names=spa_lst,
cont_names=[],
label_names=["rating"],
batch_size=batch_size,
buffer_size=0.1,
sparse_names=spa_lst,
sparse_max=spa_mx,
sparse_as_dense=sparse_dense,
)
for batch in data_itr:
feats, labs = batch
for col in spa_lst:
feature_tensor = feats[f"{col}"]
if not sparse_dense:
assert list(feature_tensor.shape) == [batch_size, spa_mx[col]]
assert isinstance(feature_tensor, tf.sparse.SparseTensor)
else:
assert feature_tensor.shape[1] == spa_mx[col]
assert not isinstance(feature_tensor, tf.sparse.SparseTensor)
@pytest.mark.skipif(
os.environ.get("NR_USER") is not None, reason="not working correctly in ci environment"
)
@pytest.mark.skipif(importlib.util.find_spec("horovod") is None, reason="needs horovod")
@pytest.mark.skipif(
cupy and cupy.cuda.runtime.getDeviceCount() <= 1,
reason="This unittest requires multiple gpu's to run",
)
def test_horovod_multigpu(tmpdir):
json_sample = {
"conts": {},
"cats": {
"genres": {
"dtype": None,
"cardinality": 50,
"min_entry_size": 1,
"max_entry_size": 5,
"multi_min": 2,
"multi_max": 4,
"multi_avg": 3,
},
"movieId": {
"dtype": None,
"cardinality": 500,
"min_entry_size": 1,
"max_entry_size": 5,
},
"userId": {"dtype": None, "cardinality": 500, "min_entry_size": 1, "max_entry_size": 5},
},
"labels": {"rating": {"dtype": None, "cardinality": 2}},
}
cols = datagen._get_cols_from_schema(json_sample)
df_gen = datagen.DatasetGen(datagen.UniformDistro(), gpu_frac=0.0001)
target_path = os.path.join(tmpdir, "input/")
os.mkdir(target_path)
df_files = df_gen.full_df_create(10000, cols, output=target_path)
# process them
cat_features = nvt.ColumnSelector(["userId", "movieId", "genres"]) >> nvt.ops.Categorify()
ratings = nvt.ColumnSelector(["rating"]) >> nvt.ops.LambdaOp(
lambda col: (col > 3).astype("int8")
)
output = cat_features + ratings
proc = nvt.Workflow(output)
target_path_train = os.path.join(tmpdir, "train/")
os.mkdir(target_path_train)
proc.fit_transform(nvt.Dataset(df_files)).to_parquet(
output_path=target_path_train, out_files_per_proc=5
)
# add new location
target_path = os.path.join(tmpdir, "workflow/")
os.mkdir(target_path)
proc.save(target_path)
curr_path = os.path.abspath(__file__)
repo_root = os.path.relpath(os.path.normpath(os.path.join(curr_path, "../../../..")))
hvd_wrap_path = os.path.join(repo_root, "examples/multi-gpu-movielens/hvd_wrapper.sh")
hvd_exam_path = os.path.join(repo_root, "examples/multi-gpu-movielens/tf_trainer.py")
with subprocess.Popen(
[
"horovodrun",
"-np",
"2",
"-H",
"localhost:2",
"sh",
hvd_wrap_path,
"python",
hvd_exam_path,
"--dir_in",
f"{tmpdir}",
"--batch_size",
"1024",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as process:
process.wait()
stdout, stderr = process.communicate()
print(stdout, stderr)
assert "Loss:" in str(stdout)
@pytest.mark.parametrize("batch_size", [1000])
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("device", [None, 0])
def test_dataloader_schema(tmpdir, df, dataset, batch_size, engine, device):
cat_names = ["name-cat", "name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
conts = cont_names >> ops.FillMedian() >> ops.Normalize()
cats = cat_names >> ops.Categorify()
processor = nvt.Workflow(conts + cats + label_name)
output_train = os.path.join(tmpdir, "train/")
os.mkdir(output_train)
processor.fit_transform(dataset).to_parquet(
shuffle=nvt.io.Shuffle.PER_PARTITION,
output_path=output_train,
out_files_per_proc=2,
)
tar_paths = [
os.path.join(output_train, x) for x in os.listdir(output_train) if x.endswith("parquet")
]
nvt_data = nvt.Dataset(tar_paths, engine="parquet")
data_loader = tf_dataloader.KerasSequenceLoader(
nvt_data,
batch_size=batch_size,
shuffle=False,
label_names=label_name,
)
batch = next(iter(data_loader))
assert all(name in batch[0] for name in cat_names)
assert all(name in batch[0] for name in cont_names)
num_label_cols = batch[1].shape[1] if len(batch[1].shape) > 1 else 1
assert num_label_cols == len(label_name)
| []
| []
| [
"NR_USER"
]
| [] | ["NR_USER"] | python | 1 | 0 | |
plugins/config/src/commands/commands.go | package main
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"github.com/dokku/dokku/plugins/config"
columnize "github.com/ryanuber/columnize"
)
const (
helpHeader = `Usage: dokku config [<app>|--global]
Display all global or app-specific config vars
Additional commands:`
helpContent = `
config (<app>|--global), Pretty-print an app or global environment
config:get (<app>|--global) KEY, Display a global or app-specific config value
config:set (<app>|--global) [--encoded] [--no-restart] KEY1=VALUE1 [KEY2=VALUE2 ...], Set one or more config vars
config:unset (<app>|--global) KEY1 [KEY2 ...], Unset one or more config vars
config:export (<app>|--global) [--envfile], Export a global or app environment
config:keys (<app>|--global) [--merged], Show keys set in environment
config:bundle (<app>|--global) [--merged], Bundle environment into tarfile
`
)
func main() {
flag.Usage = usage
flag.Parse()
cmd := flag.Arg(0)
switch cmd {
case "config", "config:show":
args := flag.NewFlagSet("config:show", flag.ExitOnError)
global := args.Bool("global", false, "--global: use the global environment")
shell := args.Bool("shell", false, "--shell: in a single-line for usage in command-line utilities [deprecated]")
export := args.Bool("export", false, "--export: print the env as eval-compatible exports [deprecated]")
merged := args.Bool("merged", false, "--merged: display the app's environment merged with the global environment")
args.Parse(os.Args[2:])
config.CommandShow(args.Args(), *global, *shell, *export, *merged)
case "help":
fmt.Print(helpContent)
case "config:help":
usage()
default:
dokkuNotImplementExitCode, err := strconv.Atoi(os.Getenv("DOKKU_NOT_IMPLEMENTED_EXIT"))
if err != nil {
fmt.Println("failed to retrieve DOKKU_NOT_IMPLEMENTED_EXIT environment variable")
dokkuNotImplementExitCode = 10
}
os.Exit(dokkuNotImplementExitCode)
}
}
func usage() {
config := columnize.DefaultConfig()
config.Delim = ","
config.Prefix = " "
config.Empty = ""
content := strings.Split(helpContent, "\n")[1:]
fmt.Println(helpHeader)
fmt.Println(columnize.Format(content, config))
}
| [
"\"DOKKU_NOT_IMPLEMENTED_EXIT\""
]
| []
| [
"DOKKU_NOT_IMPLEMENTED_EXIT"
]
| [] | ["DOKKU_NOT_IMPLEMENTED_EXIT"] | go | 1 | 0 | |
cvat/apps/engine/views.py | # Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import errno
import io
import os
import os.path as osp
import pytz
import shutil
import traceback
import uuid
from datetime import datetime
from distutils.util import strtobool
from tempfile import mkstemp, NamedTemporaryFile
import cv2
from django.db.models.query import Prefetch
import django_rq
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_filters import rest_framework as filters
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg import openapi
from drf_yasg.inspectors import CoreAPICompatInspector, NotHandled, FieldInspector
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException, NotFound, ValidationError
from rest_framework.permissions import SAFE_METHODS, IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from sendfile import sendfile
import cvat.apps.dataset_manager as dm
import cvat.apps.dataset_manager.views # pylint: disable=unused-import
from cvat.apps.authentication import auth
from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials, Status
from cvat.apps.dataset_manager.bindings import CvatImportError
from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.media_extractors import ImageListReader
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.models import (
Job, StatusChoice, Task, Project, Review, Issue,
Comment, StorageMethodChoice, ReviewStatus, StorageChoice, Image,
CredentialsTypeChoice, CloudProviderChoice
)
from cvat.apps.engine.models import CloudStorage as CloudStorageModel
from cvat.apps.engine.serializers import (
AboutSerializer, AnnotationFileSerializer, BasicUserSerializer,
DataMetaSerializer, DataSerializer, ExceptionSerializer,
FileInfoSerializer, JobSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer,
RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer,
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer,
CloudStorageSerializer, BaseCloudStorageSerializer, TaskFileSerializer,)
from utils.dataset_manifest import ImageManifestManager
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.backup import import_task
from . import models, task
from .log import clogger, slogger
class ServerViewSet(viewsets.ViewSet):
serializer_class = None
# To get nice documentation about ServerViewSet actions it is necessary
# to implement the method. By default, ViewSet doesn't provide it.
def get_serializer(self, *args, **kwargs):
pass
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides basic CVAT information',
responses={'200': AboutSerializer})
@action(detail=False, methods=['GET'], serializer_class=AboutSerializer)
def about(request):
from cvat import __version__ as cvat_version
about = {
"name": "Computer Vision Annotation Tool",
"version": cvat_version,
"description": "CVAT is completely re-designed and re-implemented " +
"version of Video Annotation Tool from Irvine, California " +
"tool. It is free, online, interactive video and image annotation " +
"tool for computer vision. It is being used by our team to " +
"annotate million of objects with different properties. Many UI " +
"and UX decisions are based on feedbacks from professional data " +
"annotation team."
}
serializer = AboutSerializer(data=about)
if serializer.is_valid(raise_exception=True):
return Response(data=serializer.data)
@staticmethod
@swagger_auto_schema(method='post', request_body=ExceptionSerializer)
@action(detail=False, methods=['POST'], serializer_class=ExceptionSerializer)
def exception(request):
"""
Saves an exception from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = ExceptionSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
additional_info = {
"username": request.user.username,
"name": "Send exception",
}
message = JSONRenderer().render({**serializer.data, **additional_info}).decode('UTF-8')
jid = serializer.data.get("job_id")
tid = serializer.data.get("task_id")
if jid:
clogger.job[jid].error(message)
elif tid:
clogger.task[tid].error(message)
else:
clogger.glob.error(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(method='post', request_body=LogEventSerializer(many=True))
@action(detail=False, methods=['POST'], serializer_class=LogEventSerializer)
def logs(request):
"""
Saves logs from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = LogEventSerializer(many=True, data=request.data)
if serializer.is_valid(raise_exception=True):
user = { "username": request.user.username }
for event in serializer.data:
message = JSONRenderer().render({**event, **user}).decode('UTF-8')
jid = event.get("job_id")
tid = event.get("task_id")
if jid:
clogger.job[jid].info(message)
elif tid:
clogger.task[tid].info(message)
else:
clogger.glob.info(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(
method='get', operation_summary='Returns all files and folders that are on the server along specified path',
manual_parameters=[openapi.Parameter('directory', openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Directory to browse')],
responses={'200' : FileInfoSerializer(many=True)}
)
@action(detail=False, methods=['GET'], serializer_class=FileInfoSerializer)
def share(request):
param = request.query_params.get('directory', '/')
if param.startswith("/"):
param = param[1:]
directory = os.path.abspath(os.path.join(settings.SHARE_ROOT, param))
if directory.startswith(settings.SHARE_ROOT) and os.path.isdir(directory):
data = []
content = os.scandir(directory)
for entry in content:
entry_type = None
if entry.is_file():
entry_type = "REG"
elif entry.is_dir():
entry_type = "DIR"
if entry_type:
data.append({"name": entry.name, "type": entry_type})
serializer = FileInfoSerializer(many=True, data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
else:
return Response("{} is an invalid directory".format(param),
status=status.HTTP_400_BAD_REQUEST)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides the list of supported annotations formats',
responses={'200': DatasetFormatsSerializer()})
@action(detail=False, methods=['GET'], url_path='annotation/formats')
def annotation_formats(request):
data = dm.views.get_all_formats()
return Response(DatasetFormatsSerializer(data).data)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides allowed plugins.',
responses={'200': PluginsSerializer()})
@action(detail=False, methods=['GET'], url_path='plugins', serializer_class=PluginsSerializer)
def plugins(request):
response = {
'GIT_INTEGRATION': apps.is_installed('cvat.apps.dataset_repo'),
'ANALYTICS': False,
'MODELS': False,
'PREDICT': apps.is_installed('cvat.apps.training')
}
if strtobool(os.environ.get("CVAT_ANALYTICS", '0')):
response['ANALYTICS'] = True
if strtobool(os.environ.get("CVAT_SERVERLESS", '0')):
response['MODELS'] = True
return Response(response)
class ProjectFilter(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
assignee = filters.CharFilter(field_name="assignee__username", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
class Meta:
model = models.Project
fields = ("id", "name", "owner", "status")
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of projects according to query parameters (12 projects per page)',
manual_parameters=[
openapi.Parameter('id', openapi.IN_QUERY, description="A unique number value identifying this project",
type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all projects where name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all project where owner name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all projects with a specific status",
type=openapi.TYPE_STRING, enum=[str(i) for i in StatusChoice]),
openapi.Parameter('names_only', openapi.IN_QUERY, description="Returns only names and id's of projects.",
type=openapi.TYPE_BOOLEAN),
openapi.Parameter('without_tasks', openapi.IN_QUERY, description="Returns only projects entities without related tasks",
type=openapi.TYPE_BOOLEAN)],))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new project'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific project'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific project'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a project'))
class ProjectViewSet(auth.ProjectGetQuerySetMixin, viewsets.ModelViewSet):
queryset = models.Project.objects.all().order_by('-id')
search_fields = ("name", "owner__username", "assignee__username", "status")
filterset_class = ProjectFilter
ordering_fields = ("id", "name", "owner", "status", "assignee")
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
def get_serializer_class(self):
if self.request.path.endswith('tasks'):
return TaskSerializer
if self.request.query_params and self.request.query_params.get("names_only") == "true":
return ProjectSearchSerializer
if self.request.query_params and self.request.query_params.get("without_tasks") == "true":
return ProjectWithoutTaskSerializer
else:
return ProjectSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.ProjectAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.ProjectCreatePermission)
elif http_method in ["PATCH"]:
permissions.append(auth.ProjectChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.ProjectDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def perform_create(self, serializer):
def validate_project_limit(owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['project_limit'] is not None and \
Project.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['project_limit']:
raise serializers.ValidationError('The user has the maximum number of projects')
owner = self.request.data.get('owner', None)
if owner:
validate_project_limit(owner)
serializer.save()
else:
validate_project_limit(self.request.user)
serializer.save(owner=self.request.user)
@swagger_auto_schema(method='get', operation_summary='Returns information of the tasks of the project with the selected id',
responses={'200': TaskSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=TaskSerializer)
def tasks(self, request, pk):
self.get_object() # force to call check_object_permissions
queryset = Task.objects.filter(project_id=pk).order_by('-id')
queryset = auth.filter_task_queryset(queryset, request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True,
context={"request": request})
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True,
context={"request": request})
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export project as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/project/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@swagger_auto_schema(method='get', operation_summary='Method allows to download project annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
'401': openapi.Response(description='Format is not specified'),
}
)
@action(detail=True, methods=['GET'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/projects/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
return Response("Format is not specified",status=status.HTTP_400_BAD_REQUEST)
class TaskFilter(filters.FilterSet):
project = filters.CharFilter(field_name="project__name", lookup_expr="icontains")
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
mode = filters.CharFilter(field_name="mode", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
assignee = filters.CharFilter(field_name="assignee__username", lookup_expr="icontains")
class Meta:
model = Task
fields = ("id", "project_id", "project", "name", "owner", "mode", "status",
"assignee")
class DjangoFilterInspector(CoreAPICompatInspector):
def get_filter_parameters(self, filter_backend):
if isinstance(filter_backend, DjangoFilterBackend):
result = super(DjangoFilterInspector, self).get_filter_parameters(filter_backend)
res = result.copy()
# Voxel hack - allow filtering by project
# for param in result:
# if param.get('name') == 'project_id' or param.get('name') == 'project':
# res.remove(param)
return res
return NotHandled
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of tasks according to query parameters (10 tasks per page)',
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this task",type=openapi.TYPE_NUMBER),
# Voxel hack - allow filtering by project
openapi.Parameter('project_id',openapi.IN_QUERY,description="Find all tasks where project_id matches a parameter value",type=openapi.TYPE_NUMBER),
openapi.Parameter('projectId',openapi.IN_QUERY,description="Find all tasks where project_id matches a parameter value",type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all tasks where name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all tasks where owner name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('mode', openapi.IN_QUERY, description="Find all tasks with a specific mode", type=openapi.TYPE_STRING, enum=['annotation', 'interpolation']),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all tasks with a specific status", type=openapi.TYPE_STRING,enum=['annotation','validation','completed']),
openapi.Parameter('assignee', openapi.IN_QUERY, description="Find all tasks where assignee name contains a parameter value", type=openapi.TYPE_STRING)
],
filter_inspectors=[DjangoFilterInspector]))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new task in a database without any attached images and videos'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific task'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a task by id'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific task, all attached jobs, annotations, and data'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a task'))
class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
queryset = Task.objects.all().prefetch_related(
"label_set__attributespec_set",
"segment_set__job_set",
).order_by('-id')
serializer_class = TaskSerializer
search_fields = ("name", "owner__username", "mode", "status")
filterset_class = TaskFilter
ordering_fields = ("id", "name", "owner", "status", "assignee")
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.TaskAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.TaskCreatePermission)
elif self.action == 'annotations' or http_method in ["PATCH", "PUT"]:
permissions.append(auth.TaskChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.TaskDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def _validate_task_limit(self, owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
raise serializers.ValidationError('The user has the maximum number of tasks')
def create(self, request):
action = self.request.query_params.get('action', None)
if action is None:
return super().create(request)
elif action == 'import':
self._validate_task_limit(owner=self.request.user)
if 'rq_id' in request.data:
rq_id = request.data['rq_id']
else:
rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4())
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = TaskFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
task_file = serializer.validated_data['task_file']
fd, filename = mkstemp(prefix='cvat_')
with open(filename, 'wb+') as f:
for chunk in task_file.chunks():
f.write(chunk)
rq_job = queue.enqueue_call(
func=import_task,
args=(filename, request.user.id),
job_id=rq_id,
meta={
'tmp_file': filename,
'tmp_file_descriptor': fd,
},
)
else:
if rq_job.is_finished:
task_id = rq_job.return_value
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response({'id': task_id}, status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def retrieve(self, request, pk=None):
db_task = self.get_object() # force to call check_object_permissions
action = self.request.query_params.get('action', None)
if action is None:
return super().retrieve(request, pk)
elif action in ('export', 'download'):
queue = django_rq.get_queue("default")
rq_id = "/api/v1/tasks/{}/export".format(pk)
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_task_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = "task_{}_backup_{}{}".format(
db_task.name, timestamp,
osp.splitext(file_path)[1])
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.TASK_CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def perform_create(self, serializer):
owner = self.request.data.get('owner', None)
if owner:
self._validate_task_limit(owner)
serializer.save()
else:
self._validate_task_limit(self.request.user)
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
task_dirname = instance.get_task_dirname()
super().perform_destroy(instance)
shutil.rmtree(task_dirname, ignore_errors=True)
if instance.data and not instance.data.tasks.all():
shutil.rmtree(instance.data.get_data_dirname(), ignore_errors=True)
instance.data.delete()
@swagger_auto_schema(method='get', operation_summary='Returns a list of jobs for a specific task',
responses={'200': JobSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=JobSerializer)
def jobs(self, request, pk):
self.get_object() # force to call check_object_permissions
queryset = Job.objects.filter(segment__task_id=pk)
serializer = JobSerializer(queryset, many=True,
context={"request": request})
return Response(serializer.data)
@swagger_auto_schema(method='post', operation_summary='Method permanently attaches images or video to a task',
request_body=DataSerializer,
)
@swagger_auto_schema(method='get', operation_summary='Method returns data for a specific task',
manual_parameters=[
openapi.Parameter('type', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['chunk', 'frame', 'preview', 'context_image'],
description="Specifies the type of the requested data"),
openapi.Parameter('quality', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"),
openapi.Parameter('number', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_NUMBER,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"),
]
)
@action(detail=True, methods=['POST', 'GET'])
def data(self, request, pk):
db_task = self.get_object() # call check_object_permissions as well
if request.method == 'POST':
if db_task.data:
return Response(data='Adding more data is not supported',
status=status.HTTP_400_BAD_REQUEST)
serializer = DataSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
db_data = serializer.save()
db_task.data = db_data
db_task.save()
data = {k:v for k, v in serializer.data.items()}
data['use_zip_chunks'] = serializer.validated_data['use_zip_chunks']
data['use_cache'] = serializer.validated_data['use_cache']
data['copy_data'] = serializer.validated_data['copy_data']
if data['use_cache']:
db_task.data.storage_method = StorageMethodChoice.CACHE
db_task.data.save(update_fields=['storage_method'])
if data['server_files'] and not data.get('copy_data'):
db_task.data.storage = StorageChoice.SHARE
db_task.data.save(update_fields=['storage'])
if db_data.cloud_storage:
db_task.data.storage = StorageChoice.CLOUD_STORAGE
db_task.data.save(update_fields=['storage'])
# if the value of stop_frame is 0, then inside the function we cannot know
# the value specified by the user or it's default value from the database
if 'stop_frame' not in serializer.validated_data:
data['stop_frame'] = None
task.create(db_task.id, data)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
data_type = request.query_params.get('type', None)
data_id = request.query_params.get('number', None)
data_quality = request.query_params.get('quality', 'compressed')
possible_data_type_values = ('chunk', 'frame', 'preview', 'context_image')
possible_quality_values = ('compressed', 'original')
try:
if not data_type or data_type not in possible_data_type_values:
raise ValidationError(detail='Data type not specified or has wrong value')
elif data_type == 'chunk' or data_type == 'frame':
if not data_id:
raise ValidationError(detail='Number is not specified')
elif data_quality not in possible_quality_values:
raise ValidationError(detail='Wrong quality value')
db_data = db_task.data
if not db_data:
raise NotFound(detail='Cannot find requested data for the task')
frame_provider = FrameProvider(db_task.data, db_task.dimension)
if data_type == 'chunk':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
#TODO: av.FFmpegError processing
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
buff, mime_type = frame_provider.get_chunk(data_id, data_quality)
return HttpResponse(buff.getvalue(), content_type=mime_type)
# Follow symbol links if the chunk is a link on a real image otherwise
# mimetype detection inside sendfile will work incorrectly.
path = os.path.realpath(frame_provider.get_chunk(data_id, data_quality))
return sendfile(request, path)
elif data_type == 'frame':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
buf, mime = frame_provider.get_frame(data_id, data_quality)
return HttpResponse(buf.getvalue(), content_type=mime)
elif data_type == 'preview':
return sendfile(request, frame_provider.get_preview())
elif data_type == 'context_image':
data_id = int(data_id)
image = Image.objects.get(data_id=db_data.id, frame=data_id)
for i in image.related_files.all():
path = os.path.realpath(str(i.path))
image = cv2.imread(path)
success, result = cv2.imencode('.JPEG', image)
if not success:
raise Exception('Failed to encode image to ".jpeg" format')
return HttpResponse(io.BytesIO(result.tobytes()), content_type='image/jpeg')
return Response(data='No context image related to the frame',
status=status.HTTP_404_NOT_FOUND)
else:
return Response(data='unknown data type {}.'.format(data_type), status=status.HTTP_400_BAD_REQUEST)
except APIException as e:
return Response(data=e.get_full_details(), status=e.status_code)
except FileNotFoundError as ex:
msg = f"{ex.strerror} {ex.filename}"
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
msg = 'cannot get requested data type: {}, number: {}, quality: {}'.format(data_type, data_id, data_quality)
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg + '\n' + str(e), status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(method='get', operation_summary='Method allows to download task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='put', operation_summary='Method allows to upload task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Input format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
],
responses={
'202': openapi.Response(description='Uploading has been started'),
'201': openapi.Response(description='Uploading has finished'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='patch', operation_summary='Method performs a partial update of annotations in a specific task',
manual_parameters=[openapi.Parameter('action', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['create', 'update', 'delete'])])
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific task')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
if request.method == 'GET':
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
data = dm.task.get_task_data(pk)
serializer = LabeledDataSerializer(data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
elif request.method == 'PUT':
format_name = request.query_params.get('format')
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/tasks/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_task_annotations,
pk=pk,
format_name=format_name,
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = dm.task.put_task_data(pk, serializer.data)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_task_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_task_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='When task is being created the method returns information about a status of the creation process')
@action(detail=True, methods=['GET'], serializer_class=RqStatusSerializer)
def status(self, request, pk):
self.get_object() # force to call check_object_permissions
response = self._get_rq_response(queue="default",
job_id="/api/{}/tasks/{}".format(request.version, pk))
serializer = RqStatusSerializer(data=response)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
@staticmethod
def _get_rq_response(queue, job_id):
queue = django_rq.get_queue(queue)
job = queue.fetch_job(job_id)
response = {}
if job is None or job.is_finished:
response = { "state": "Finished" }
elif job.is_queued:
response = { "state": "Queued" }
elif job.is_failed:
response = { "state": "Failed", "message": job.exc_info }
else:
response = { "state": "Started" }
if 'status' in job.meta:
response['message'] = job.meta['status']
return response
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides a meta information about media files which are related with the task',
responses={'200': DataMetaSerializer()})
@action(detail=True, methods=['GET'], serializer_class=DataMetaSerializer,
url_path='data/meta')
def data_info(request, pk):
db_task = models.Task.objects.prefetch_related(
Prefetch('data', queryset=models.Data.objects.select_related('video').prefetch_related(
Prefetch('images', queryset=models.Image.objects.prefetch_related('related_files').order_by('frame'))
))
).get(pk=pk)
if hasattr(db_task.data, 'video'):
media = [db_task.data.video]
else:
media = list(db_task.data.images.all())
frame_meta = [{
'width': item.width,
'height': item.height,
'name': item.path,
'has_related_context': hasattr(item, 'related_files') and item.related_files.exists()
} for item in media]
db_data = db_task.data
db_data.frames = frame_meta
serializer = DataMetaSerializer(db_data)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export task as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a job'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a job by id'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a job'))
class JobViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin):
queryset = Job.objects.all().order_by('id')
serializer_class = JobSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.JobAccessPermission)
elif http_method in ['PATCH', 'PUT', 'DELETE']:
permissions.append(auth.JobChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns annotations for a specific job')
@swagger_auto_schema(method='put', operation_summary='Method performs an update of all annotations in a specific job')
@swagger_auto_schema(method='patch', manual_parameters=[
openapi.Parameter('action', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True,
enum=['create', 'update', 'delete'])],
operation_summary='Method performs a partial update of annotations in a specific job')
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific job')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
self.get_object() # force to call check_object_permissions
if request.method == 'GET':
data = dm.task.get_job_data(pk)
return Response(data)
elif request.method == 'PUT':
format_name = request.query_params.get("format", "")
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/jobs/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_job_annotations,
pk=pk,
format_name=format_name
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.put_job_data(pk, serializer.data)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_job_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_job_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of reviews for the job',
responses={'200': ReviewSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=ReviewSerializer)
def reviews(self, request, pk):
db_job = self.get_object()
queryset = db_job.review_set
serializer = ReviewSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of issues for the job',
responses={'200': CombinedIssueSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CombinedIssueSerializer)
def issues(self, request, pk):
db_job = self.get_object()
queryset = db_job.issue_set
serializer = CombinedIssueSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Submit a review for a job'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a review from a job'))
class ReviewViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.CreateModelMixin):
queryset = Review.objects.all().order_by('id')
def get_serializer_class(self):
if self.request.method == 'POST':
return CombinedReviewSerializer
else:
return ReviewSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
if self.request.method == 'POST':
permissions.append(auth.JobReviewPermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def create(self, request, *args, **kwargs):
job_id = request.data['job']
db_job = get_object_or_404(Job, pk=job_id)
self.check_object_permissions(self.request, db_job)
if request.data['status'] == ReviewStatus.REVIEW_FURTHER:
if 'reviewer_id' not in request.data:
return Response('Must provide a new reviewer', status=status.HTTP_400_BAD_REQUEST)
reviewer_id = request.data['reviewer_id']
reviewer = get_object_or_404(User, pk=reviewer_id)
request.data.update({
'reviewer_id': request.user.id,
})
if db_job.assignee:
request.data.update({
'assignee_id': db_job.assignee.id,
})
issue_set = request.data['issue_set']
for issue in issue_set:
issue['job'] = db_job.id
issue['owner_id'] = request.user.id
comment_set = issue['comment_set']
for comment in comment_set:
comment['author_id'] = request.user.id
serializer = self.get_serializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
if serializer.data['status'] == ReviewStatus.ACCEPTED:
db_job.status = StatusChoice.COMPLETED
db_job.save()
elif serializer.data['status'] == ReviewStatus.REJECTED:
db_job.status = StatusChoice.ANNOTATION
db_job.save()
else:
db_job.reviewer = reviewer
db_job.save()
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes an issue from a job'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates an issue. It is used to resolve/reopen an issue'))
class IssueViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.UpdateModelMixin):
queryset = Issue.objects.all().order_by('id')
http_method_names = ['get', 'patch', 'delete', 'options']
def get_serializer_class(self):
return IssueSerializer
def partial_update(self, request, *args, **kwargs):
db_issue = self.get_object()
if 'resolver_id' in request.data and request.data['resolver_id'] and db_issue.resolver is None:
# resolve
db_issue.resolver = request.user
db_issue.resolved_date = datetime.now()
db_issue.save(update_fields=['resolver', 'resolved_date'])
elif 'resolver_id' in request.data and not request.data['resolver_id'] and db_issue.resolver is not None:
# reopen
db_issue.resolver = None
db_issue.resolved_date = None
db_issue.save(update_fields=['resolver', 'resolved_date'])
serializer = self.get_serializer(db_issue)
return Response(serializer.data)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.IssueAccessPermission)
elif http_method in ['DELETE']:
permissions.append(auth.IssueDestroyPermission)
elif http_method in ['PATCH']:
permissions.append(auth.IssueChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='The action returns all comments of a specific issue',
responses={'200': CommentSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CommentSerializer)
def comments(self, request, pk):
db_issue = self.get_object()
queryset = db_issue.comment_set
serializer = CommentSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates comment in an issue'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a comment from an issue'))
class CommentViewSet(viewsets.GenericViewSet,
mixins.DestroyModelMixin, mixins.UpdateModelMixin, mixins.CreateModelMixin):
queryset = Comment.objects.all().order_by('id')
serializer_class = CommentSerializer
http_method_names = ['get', 'post', 'patch', 'delete', 'options']
def create(self, request, *args, **kwargs):
request.data.update({
'author_id': request.user.id,
})
issue_id = request.data['issue']
db_issue = get_object_or_404(Issue, pk=issue_id)
self.check_object_permissions(self.request, db_issue.job)
return super().create(request, args, kwargs)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in ['PATCH', 'DELETE']:
permissions.append(auth.CommentChangePermission)
elif http_method in ['POST']:
permissions.append(auth.CommentCreatePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
class UserFilter(filters.FilterSet):
class Meta:
model = User
fields = ("id", "is_active")
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this user",type=openapi.TYPE_NUMBER),
openapi.Parameter('is_active',openapi.IN_QUERY,description="Returns only active users",type=openapi.TYPE_BOOLEAN),
],
operation_summary='Method provides a paginated list of users registered on the server'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_summary='Method provides information of a specific user'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Method updates chosen fields of a user'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific user from the server'))
class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin):
queryset = User.objects.prefetch_related('groups').all().order_by('id')
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
search_fields = ('username', 'first_name', 'last_name')
filterset_class = UserFilter
def get_serializer_class(self):
user = self.request.user
if user.is_staff:
return UserSerializer
else:
is_self = int(self.kwargs.get("pk", 0)) == user.id or \
self.action == "self"
if is_self and self.request.method in SAFE_METHODS:
return UserSerializer
else:
return BasicUserSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
user = self.request.user
if not self.request.method in SAFE_METHODS:
is_self = int(self.kwargs.get("pk", 0)) == user.id
if not is_self:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns an instance of a user who is currently authorized')
@action(detail=False, methods=['GET'])
def self(self, request):
"""
Method returns an instance of a user who is currently authorized
"""
serializer_class = self.get_serializer_class()
serializer = serializer_class(request.user, context={ "request": request })
return Response(serializer.data)
class RedefineDescriptionField(FieldInspector):
# pylint: disable=no-self-use
def process_result(self, result, method_name, obj, **kwargs):
if isinstance(result, openapi.Schema):
if hasattr(result, 'title') and result.title == 'Specific attributes':
result.description = 'structure like key1=value1&key2=value2\n' \
'supported: range=aws_range'
return result
class CloudStorageFilter(filters.FilterSet):
display_name = filters.CharFilter(field_name='display_name', lookup_expr='icontains')
provider_type = filters.CharFilter(field_name='provider_type', lookup_expr='icontains')
resource = filters.CharFilter(field_name='resource', lookup_expr='icontains')
credentials_type = filters.CharFilter(field_name='credentials_type', lookup_expr='icontains')
description = filters.CharFilter(field_name='description', lookup_expr='icontains')
owner = filters.CharFilter(field_name='owner__username', lookup_expr='icontains')
class Meta:
model = models.CloudStorage
fields = ('id', 'display_name', 'provider_type', 'resource', 'credentials_type', 'description', 'owner')
@method_decorator(
name='retrieve',
decorator=swagger_auto_schema(
operation_summary='Method returns details of a specific cloud storage',
responses={
'200': openapi.Response(description='A details of a storage'),
},
tags=['cloud storages']
)
)
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of storages according to query parameters',
manual_parameters=[
openapi.Parameter('provider_type', openapi.IN_QUERY, description="A supported provider of cloud storages",
type=openapi.TYPE_STRING, enum=CloudProviderChoice.list()),
openapi.Parameter('display_name', openapi.IN_QUERY, description="A display name of storage", type=openapi.TYPE_STRING),
openapi.Parameter('resource', openapi.IN_QUERY, description="A name of bucket or container", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="A resource owner", type=openapi.TYPE_STRING),
openapi.Parameter('credentials_type', openapi.IN_QUERY, description="A type of a granting access", type=openapi.TYPE_STRING, enum=CredentialsTypeChoice.list()),
],
responses={'200': BaseCloudStorageSerializer(many=True)},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific cloud storage',
tags=['cloud storages']
)
)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a cloud storage instance',
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
class CloudStorageViewSet(auth.CloudStorageGetQuerySetMixin, viewsets.ModelViewSet):
http_method_names = ['get', 'post', 'patch', 'delete']
queryset = CloudStorageModel.objects.all().prefetch_related('data').order_by('-id')
search_fields = ('provider_type', 'display_name', 'resource', 'credentials_type', 'owner__username', 'description')
filterset_class = CloudStorageFilter
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.CloudStorageAccessPermission)
elif http_method in ("POST", "PATCH", "DELETE"):
permissions.append(auth.CloudStorageChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def get_serializer_class(self):
if self.request.method in ("POST", "PATCH"):
return CloudStorageSerializer
else:
return BaseCloudStorageSerializer
def get_queryset(self):
queryset = super().get_queryset()
provider_type = self.request.query_params.get('provider_type', None)
if provider_type:
if provider_type in CloudProviderChoice.list():
return queryset.filter(provider_type=provider_type)
raise ValidationError('Unsupported type of cloud provider')
return queryset
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
cloud_storage_dirname = instance.get_storage_dirname()
super().perform_destroy(instance)
shutil.rmtree(cloud_storage_dirname, ignore_errors=True)
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_summary='Method creates a cloud storage with a specified characteristics',
responses={
'201': openapi.Response(description='A storage has beed created')
},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField],
)
)
def create(self, request, *args, **kwargs):
try:
response = super().create(request, *args, **kwargs)
except IntegrityError:
response = HttpResponseBadRequest('Same storage already exists')
except ValidationError as exceptions:
msg_body = ""
for ex in exceptions.args:
for field, ex_msg in ex.items():
msg_body += ': '.join([field, ex_msg if isinstance(ex_msg, str) else str(ex_msg[0])])
msg_body += '\n'
return HttpResponseBadRequest(msg_body)
except APIException as ex:
return Response(data=ex.get_full_details(), status=ex.status_code)
except Exception as ex:
response = HttpResponseBadRequest(str(ex))
return response
@swagger_auto_schema(
method='get',
operation_summary='Method returns a manifest content',
manual_parameters=[
openapi.Parameter('manifest_path', openapi.IN_QUERY,
description="Path to the manifest file in a cloud storage",
type=openapi.TYPE_STRING)
],
responses={
'200': openapi.Response(description='A manifest content'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='content')
def content(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
if not db_storage.manifests.count():
raise Exception('There is no manifest file')
manifest_path = request.query_params.get('manifest_path', 'manifest.jsonl')
file_status = storage.get_file_status(manifest_path)
if file_status == Status.NOT_FOUND:
raise FileNotFoundError(errno.ENOENT,
"Not found on the cloud storage {}".format(db_storage.display_name), manifest_path)
elif file_status == Status.FORBIDDEN:
raise PermissionError(errno.EACCES,
"Access to the file on the '{}' cloud storage is denied".format(db_storage.display_name), manifest_path)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_path)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_path):
storage.download_file(manifest_path, full_manifest_path)
manifest = ImageManifestManager(full_manifest_path, db_storage.get_storage_dirname())
# need to update index
manifest.set_index()
manifest_files = manifest.data
return Response(data=manifest_files, content_type="text/plain")
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except FileNotFoundError as ex:
msg = f"{ex.strerror} {ex.filename}"
slogger.cloud_storage[pk].info(msg)
return Response(data=msg, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
# check that cloud storage was not deleted
storage_status = storage.get_status()
if storage_status == Status.FORBIDDEN:
msg = 'The resource {} is no longer available. Access forbidden.'.format(storage.name)
elif storage_status == Status.NOT_FOUND:
msg = 'The resource {} not found. It may have been deleted.'.format(storage.name)
else:
msg = str(ex)
return HttpResponseBadRequest(msg)
@swagger_auto_schema(
method='get',
operation_summary='Method returns a preview image from a cloud storage',
responses={
'200': openapi.Response(description='Preview'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
if not os.path.exists(db_storage.get_preview_path()):
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
if not db_storage.manifests.count():
raise Exception('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_path = ''.join([preview_info['name'], preview_info['extension']])
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(pk)
slogger.cloud_storage[pk].info(msg)
return HttpResponseBadRequest(msg)
file_status = storage.get_file_status(preview_path)
if file_status == Status.NOT_FOUND:
raise FileNotFoundError(errno.ENOENT,
"Not found on the cloud storage {}".format(db_storage.display_name), preview_path)
elif file_status == Status.FORBIDDEN:
raise PermissionError(errno.EACCES,
"Access to the file on the '{}' cloud storage is denied".format(db_storage.display_name), preview_path)
with NamedTemporaryFile() as temp_image:
storage.download_file(preview_path, temp_image.name)
reader = ImageListReader([temp_image.name])
preview = reader.get_preview()
preview.save(db_storage.get_preview_path())
content_type = mimetypes.guess_type(db_storage.get_preview_path())[0]
return HttpResponse(open(db_storage.get_preview_path(), 'rb').read(), content_type)
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except Exception as ex:
# check that cloud storage was not deleted
storage_status = storage.get_status()
if storage_status == Status.FORBIDDEN:
msg = 'The resource {} is no longer available. Access forbidden.'.format(storage.name)
elif storage_status == Status.NOT_FOUND:
msg = 'The resource {} not found. It may have been deleted.'.format(storage.name)
else:
msg = str(ex)
return HttpResponseBadRequest(msg)
@swagger_auto_schema(
method='get',
operation_summary='Method returns a cloud storage status',
responses={
'200': openapi.Response(description='Status'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='status')
def status(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
storage_status = storage.get_status()
return HttpResponse(storage_status)
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except Exception as ex:
msg = str(ex)
return HttpResponseBadRequest(msg)
def rq_handler(job, exc_type, exc_value, tb):
job.exc_info = "".join(
traceback.format_exception_only(exc_type, exc_value))
job.save()
if "tasks" in job.id.split("/"):
return task.rq_handler(job, exc_type, exc_value, tb)
return True
# TODO: Method should be reimplemented as a separated view
# @swagger_auto_schema(method='put', manual_parameters=[openapi.Parameter('format', in_=openapi.IN_QUERY,
# description='A name of a loader\nYou can get annotation loaders from this API:\n/server/annotation/formats',
# required=True, type=openapi.TYPE_STRING)],
# operation_summary='Method allows to upload annotations',
# responses={'202': openapi.Response(description='Load of annotations has been started'),
# '201': openapi.Response(description='Annotations have been uploaded')},
# tags=['tasks'])
# @api_view(['PUT'])
def _import_annotations(request, rq_id, rq_func, pk, format_name):
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_import_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown input format '{}'".format(format_name))
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = AnnotationFileSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
anno_file = serializer.validated_data['annotation_file']
fd, filename = mkstemp(prefix='cvat_{}'.format(pk))
with open(filename, 'wb+') as f:
for chunk in anno_file.chunks():
f.write(chunk)
av_scan_paths(filename)
rq_job = queue.enqueue_call(
func=rq_func,
args=(pk, filename, format_name),
job_id=rq_id
)
rq_job.meta['tmp_file'] = filename
rq_job.meta['tmp_file_descriptor'] = fd
rq_job.save_meta()
else:
if rq_job.is_finished:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_202_ACCEPTED)
def _export_annotations(db_instance, rq_id, request, format_name, action, callback, filename):
if action not in {"", "download"}:
raise serializers.ValidationError(
"Unexpected action specified for the request")
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_export_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown format specified for the request")
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_instance_update_time = timezone.localtime(db_instance.updated_date)
if isinstance(db_instance, Project):
tasks_update = list(map(lambda db_task: timezone.localtime(db_task.updated_date), db_instance.tasks.all()))
last_instance_update_time = max(tasks_update + [last_instance_update_time])
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_instance_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_instance_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = filename or \
"{}_{}-{}-{}{}".format(
"project" if isinstance(db_instance, models.Project) else "task",
db_instance.name, timestamp,
format_name, osp.splitext(file_path)[1]
)
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
try:
if request.scheme:
server_address = request.scheme + '://'
server_address += request.get_host()
except Exception:
server_address = None
ttl = (dm.views.PROJECT_CACHE_TTL if isinstance(db_instance, Project) else dm.views.TASK_CACHE_TTL).total_seconds()
queue.enqueue_call(func=callback,
args=(db_instance.id, format_name, server_address), job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
| []
| []
| [
"CVAT_ANALYTICS",
"CVAT_SERVERLESS"
]
| [] | ["CVAT_ANALYTICS", "CVAT_SERVERLESS"] | python | 2 | 0 | |
vendor/github.com/aliyun/alibaba-cloud-sdk-go/integration/utils.go | package integration
import (
"fmt"
"os"
"testing"
)
const InstanceNamePrefix = "SdkIntegrationTestInstance"
type Config struct {
AccessKeyId string
AccessKeySecret string
PublicKeyId string
PrivateKey string
RoleArn string
ChildAK string
ChildSecret string
}
func getConfigFromEnv() *Config {
config := &Config{
AccessKeyId: os.Getenv("ACCESS_KEY_ID"),
AccessKeySecret: os.Getenv("ACCESS_KEY_SECRET"),
PublicKeyId: os.Getenv("PUBLIC_KEY_ID"),
PrivateKey: os.Getenv("PRIVATE_KEY"),
RoleArn: os.Getenv("ROLE_ARN"),
ChildAK: os.Getenv("CHILD_AK"),
ChildSecret: os.Getenv("CHILD_SECRET"),
}
if config.AccessKeyId == "" {
panic("Get ACCESS_KEY_ID from environment variables failed")
} else {
return config
}
}
func getEcsDemoInstanceId() string {
return os.Getenv("DEMO_ECS_INSTANCE_ID")
}
func assertErrorNil(t *testing.T, err error, message string) {
if err != nil {
fmt.Fprintf(os.Stderr, message+": %v\n", err)
}
}
| [
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\"",
"\"PUBLIC_KEY_ID\"",
"\"PRIVATE_KEY\"",
"\"ROLE_ARN\"",
"\"CHILD_AK\"",
"\"CHILD_SECRET\"",
"\"DEMO_ECS_INSTANCE_ID\""
]
| []
| [
"CHILD_AK",
"PUBLIC_KEY_ID",
"ROLE_ARN",
"PRIVATE_KEY",
"DEMO_ECS_INSTANCE_ID",
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID",
"CHILD_SECRET"
]
| [] | ["CHILD_AK", "PUBLIC_KEY_ID", "ROLE_ARN", "PRIVATE_KEY", "DEMO_ECS_INSTANCE_ID", "ACCESS_KEY_SECRET", "ACCESS_KEY_ID", "CHILD_SECRET"] | go | 8 | 0 | |
tasks/task01.py | import tensorflow as tf
from tensorflow_probability import distributions as tfd
from tensorflow import keras
import numpy as np
import os
import argparse
import datetime
import time
import sys
sys.path.insert(0, './src')
sys.path.insert(0, './tasks')
import utils
import iwae1
import iwae2
import plot_task01
parser = argparse.ArgumentParser()
parser.add_argument("--stochastic_layers", type=int, default=1, choices=[1, 2], help="number of stochastic layers in the model")
parser.add_argument("--n_samples", type=int, default=5, help="number of importance samples")
parser.add_argument("--batch_size", type=int, default=20, help="batch size")
parser.add_argument("--epochs", type=int, default=-1,
help="numper of epochs, if set to -1 number of epochs "
"will be set based on the learning rate scheme from the paper")
parser.add_argument("--objective", type=str, default="iwae_elbo", choices=["vae_elbo", "iwae_elbo", "iwae_eq14", "vae_elbo_kl"])
parser.add_argument("--gpu", type=str, default='0', help="Choose GPU")
args = parser.parse_args()
print(args)
# ---- string describing the experiment, to use in tensorboard and plots
string = "task01_{0}_{1}_{2}".format(args.objective, args.stochastic_layers, args.n_samples)
# ---- set the visible GPU devices
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# ---- dynamic GPU memory allocation
gpus = tf.config.list_physical_devices('GPU')
if gpus:
tf.config.experimental.set_memory_growth(gpus[0], True)
# ---- set random seeds
np.random.seed(123)
tf.random.set_seed(123)
# ---- number of passes over the data, see bottom of page 6 in [1]
if args.epochs == -1:
epochs = 0
learning_rate_dict = {}
for i in range(8):
learning_rate = 0.001 * 10**(-i/7)
learning_rate_dict[epochs] = learning_rate
epochs += 3 ** i
else:
epochs = args.epochs
learning_rate_dict = {}
learning_rate_dict[0] = 0.0001
# ---- load data
(Xtrain, ytrain), (Xtest, ytest) = keras.datasets.mnist.load_data()
Ntrain = Xtrain.shape[0]
Ntest = Xtest.shape[0]
# ---- reshape to vectors
Xtrain = Xtrain.reshape(Ntrain, -1) / 255
Xtest = Xtest.reshape(Ntest, -1) / 255
# ---- experiment settings
objective = args.objective
n_samples = args.n_samples
batch_size = args.batch_size
steps_pr_epoch = Ntrain // batch_size
total_steps = steps_pr_epoch * epochs
# ---- prepare tensorboard
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = "/tmp/iwae/{0}/".format(string) + current_time + "/train"
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_log_dir = "/tmp/iwae/{0}/".format(string) + current_time + "/test"
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# ---- instantiate the model, optimizer and metrics
if args.stochastic_layers == 1:
n_latent = [2]
n_hidden = [200]
model = iwae1.IWAE(n_hidden[0], n_latent[0])
else:
n_latent = [2, 2]
n_hidden = [200, 100]
model = iwae2.IWAE(n_hidden, n_latent)
optimizer = keras.optimizers.Adam(learning_rate_dict[0], epsilon=1e-4)
print("Initial learning rate: ", optimizer.learning_rate.numpy())
# ---- prepare plotting of samples during training
# use the same samples from the prior throughout training
pz = tfd.Normal(0, 1)
z = pz.sample([100, n_latent[-1]])
plt_epochs = list(2**np.arange(12))
plt_epochs.insert(0, 0)
plt_epochs.append(epochs-1)
# ---- binarize the test data
# we'll only do this once, while the training data is binarized at the
# start of each epoch
Xtest = utils.bernoullisample(Xtest)
# ---- do the training
start = time.time()
best = float(-np.inf)
for epoch in range(epochs):
# ---- binarize the training data at the start of each epoch
Xtrain_binarized = utils.bernoullisample(Xtrain)
train_dataset = (tf.data.Dataset.from_tensor_slices(Xtrain_binarized)
.shuffle(Ntrain).batch(batch_size))
# ---- plot samples from the prior at this epoch
if epoch in plt_epochs:
model.generate_and_save_images(z, epoch, string)
model.generate_and_save_posteriors(Xtest, ytest, 10, epoch, string)
# ---- check if the learning rate needs to be updated
if args.epochs == -1 and epoch in learning_rate_dict:
new_learning_rate = learning_rate_dict[epoch]
old_learning_rate = optimizer.learning_rate.numpy()
print("Changing learning rate from {0} to {1}".format(old_learning_rate, new_learning_rate))
optimizer.learning_rate.assign(new_learning_rate)
for _step, x_batch in enumerate(train_dataset):
step = _step + steps_pr_epoch * epoch
# ---- warm-up
beta = 1.0
# beta = np.min([step / 200000, 1.0]).astype(np.float32)
# ---- one training step
res = model.train_step(x_batch, n_samples, beta, optimizer, objective=objective)
if step % 200 == 0:
# ---- write training stats to tensorboard
with train_summary_writer.as_default():
model.write_to_tensorboard(res, step)
# ---- monitor the test-set
test_res = model.val_step(Xtest, n_samples, beta)
# ---- write test stats to tensorboard
with test_summary_writer.as_default():
model.write_to_tensorboard(test_res, step)
took = time.time() - start
start = time.time()
print("epoch {0}/{1}, step {2}/{3}, train ELBO: {4:.2f}, val ELBO: {5:.2f}, time: {6:.2f}"
.format(epoch, epochs, step, total_steps, res[objective].numpy(), test_res[objective], took))
# ---- save final weights
model.save_weights('/tmp/iwae/{0}/final_weights'.format(string))
# ---- load the final weights?
# model.load_weights('/tmp/iwae/{0}/final_weights'.format(string))
# ---- test-set llh estimate using 5000 samples
test_elbo_metric = utils.MyMetric()
L = 5000
# ---- since we are using 5000 importance samples we have to loop over each element of the test-set
for i, x in enumerate(Xtest):
res = model(x[None, :].astype(np.float32), L)
test_elbo_metric.update_state(res['iwae_elbo'][None, None])
if i % 200 == 0:
print("{0}/{1}".format(i, Ntest))
test_set_llh = test_elbo_metric.result()
test_elbo_metric.reset_states()
print("Test-set {0} sample log likelihood estimate: {1:.4f}".format(L, test_set_llh))
# ---- plot variational and true posteriors
plot_task01.plot(model, Xtest, string)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
drivers/node/ibm/ibm.go | package ibm
import (
"os"
"time"
"github.com/sirupsen/logrus"
"github.com/libopenstorage/cloudops"
iks "github.com/libopenstorage/cloudops/ibm"
"github.com/portworx/torpedo/drivers/node"
"github.com/portworx/torpedo/drivers/node/ssh"
)
const (
// DriverName is the name of the ibm driver
DriverName = "ibm"
)
type ibm struct {
ssh.SSH
ops cloudops.Ops
instanceGroup string
}
func (i *ibm) String() string {
return DriverName
}
func (i *ibm) Init(nodeOpts node.InitOptions) error {
i.SSH.Init(nodeOpts)
instanceGroup := os.Getenv("INSTANCE_GROUP")
if len(instanceGroup) != 0 {
i.instanceGroup = instanceGroup
} else {
i.instanceGroup = "default"
}
ops, err := iks.NewClient()
if err != nil {
return err
}
i.ops = ops
return nil
}
func (i *ibm) SetASGClusterSize(perZoneCount int64, timeout time.Duration) error {
// IBM SDK requires per zone cluster size
err := i.ops.SetInstanceGroupSize(i.instanceGroup, perZoneCount, timeout)
if err != nil {
logrus.Errorf("failed to set size of node pool %s. Error: %v", i.instanceGroup, err)
return err
}
return nil
}
func (i *ibm) GetASGClusterSize() (int64, error) {
nodeCount, err := i.ops.GetInstanceGroupSize(i.instanceGroup)
if err != nil {
logrus.Errorf("failed to get size of node pool %s. Error: %v", i.instanceGroup, err)
return 0, err
}
return nodeCount, nil
}
func (i *ibm) GetZones() ([]string, error) {
asgInfo, err := i.ops.InspectInstanceGroupForInstance(i.ops.InstanceID())
if err != nil {
return []string{}, err
}
return asgInfo.Zones, nil
}
func init() {
i := &ibm{
SSH: *ssh.New(),
}
node.Register(DriverName, i)
}
| [
"\"INSTANCE_GROUP\""
]
| []
| [
"INSTANCE_GROUP"
]
| [] | ["INSTANCE_GROUP"] | go | 1 | 0 | |
appengine/pusher-chat/src/main/java/com/example/appengine/pusher/PusherService.java | /*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.appengine.pusher;
import com.pusher.rest.Pusher;
// [START pusher_server_initialize]
public abstract class PusherService {
public static final String APP_KEY = System.getenv("PUSHER_APP_KEY");
public static final String CLUSTER = System.getenv("PUSHER_CLUSTER");
private static final String APP_ID = System.getenv("PUSHER_APP_ID");
private static final String APP_SECRET = System.getenv("PUSHER_APP_SECRET");
private static Pusher instance;
static Pusher getDefaultInstance() {
if (instance != null) {
return instance;
} // Instantiate a pusher
Pusher pusher = new Pusher(APP_ID, APP_KEY, APP_SECRET);
pusher.setCluster(CLUSTER); // required, if not default mt1 (us-east-1)
pusher.setEncrypted(true); // optional, ensure subscriber also matches these settings
instance = pusher;
return pusher;
}
}
// [END pusher_server_initialize]
| [
"\"PUSHER_APP_KEY\"",
"\"PUSHER_CLUSTER\"",
"\"PUSHER_APP_ID\"",
"\"PUSHER_APP_SECRET\""
]
| []
| [
"PUSHER_APP_SECRET",
"PUSHER_APP_KEY",
"PUSHER_APP_ID",
"PUSHER_CLUSTER"
]
| [] | ["PUSHER_APP_SECRET", "PUSHER_APP_KEY", "PUSHER_APP_ID", "PUSHER_CLUSTER"] | java | 4 | 0 | |
polygerrit-ui/server.go | // Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"encoding/json"
"errors"
"flag"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"golang.org/x/tools/godoc/vfs/httpfs"
"golang.org/x/tools/godoc/vfs/zipfs"
)
var (
plugins = flag.String("plugins", "", "comma seperated plugin paths to serve")
port = flag.String("port", ":8081", "Port to serve HTTP requests on")
host = flag.String("host", "gerrit-review.googlesource.com", "Host to proxy requests to")
scheme = flag.String("scheme", "https", "URL scheme")
cdnPattern = regexp.MustCompile("https://cdn.googlesource.com/polygerrit_ui/[0-9.]*")
)
func main() {
flag.Parse()
fontsArchive, err := openDataArchive("fonts.zip")
if err != nil {
log.Fatal(err)
}
componentsArchive, err := openDataArchive("app/test_components.zip")
if err != nil {
log.Fatal(err)
}
workspace := os.Getenv("BUILD_WORKSPACE_DIRECTORY")
if err := os.Chdir(filepath.Join(workspace, "polygerrit-ui")); err != nil {
log.Fatal(err)
}
http.Handle("/", http.FileServer(http.Dir("app")))
http.Handle("/bower_components/",
http.FileServer(httpfs.New(zipfs.New(componentsArchive, "bower_components"))))
http.Handle("/fonts/",
http.FileServer(httpfs.New(zipfs.New(fontsArchive, "fonts"))))
http.HandleFunc("/index.html", handleIndex)
http.HandleFunc("/changes/", handleProxy)
http.HandleFunc("/accounts/", handleProxy)
http.HandleFunc("/config/", handleProxy)
http.HandleFunc("/projects/", handleProxy)
http.HandleFunc("/static/", handleProxy)
http.HandleFunc("/accounts/self/detail", handleAccountDetail)
if len(*plugins) > 0 {
http.Handle("/plugins/", http.StripPrefix("/plugins/",
http.FileServer(http.Dir("../plugins"))))
log.Println("Local plugins from", "../plugins")
} else {
http.HandleFunc("/plugins/", handleProxy)
}
log.Println("Serving on port", *port)
log.Fatal(http.ListenAndServe(*port, &server{}))
}
func openDataArchive(path string) (*zip.ReadCloser, error) {
absBinPath, err := resourceBasePath()
if err != nil {
return nil, err
}
return zip.OpenReader(absBinPath + ".runfiles/gerrit/polygerrit-ui/" + path)
}
func resourceBasePath() (string, error) {
return filepath.Abs(os.Args[0])
}
func handleIndex(writer http.ResponseWriter, originalRequest *http.Request) {
fakeRequest := &http.Request{
URL: &url.URL{
Path: "/",
RawQuery: originalRequest.URL.RawQuery,
},
}
handleProxy(writer, fakeRequest)
}
func handleProxy(writer http.ResponseWriter, originalRequest *http.Request) {
patchedRequest := &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: *scheme,
Host: *host,
Opaque: originalRequest.URL.EscapedPath(),
RawQuery: originalRequest.URL.RawQuery,
},
}
response, err := http.DefaultClient.Do(patchedRequest)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
defer response.Body.Close()
for name, values := range response.Header {
for _, value := range values {
if name != "Content-Length" {
writer.Header().Add(name, value)
}
}
}
writer.WriteHeader(response.StatusCode)
if _, err := io.Copy(writer, patchResponse(originalRequest, response)); err != nil {
log.Println("Error copying response to ResponseWriter:", err)
return
}
}
func getJsonPropByPath(json map[string]interface{}, path []string) interface{} {
prop, path := path[0], path[1:]
if json[prop] == nil {
return nil
}
switch json[prop].(type) {
case map[string]interface{}: // map
return getJsonPropByPath(json[prop].(map[string]interface{}), path)
case []interface{}: // array
return json[prop].([]interface{})
default:
return json[prop].(interface{})
}
}
func setJsonPropByPath(json map[string]interface{}, path []string, value interface{}) {
prop, path := path[0], path[1:]
if json[prop] == nil {
return // path not found
}
if len(path) > 0 {
setJsonPropByPath(json[prop].(map[string]interface{}), path, value)
} else {
json[prop] = value
}
}
func patchResponse(req *http.Request, res *http.Response) io.Reader {
switch req.URL.EscapedPath() {
case "/":
return replaceCdn(res.Body)
case "/config/server/info":
return injectLocalPlugins(res.Body)
default:
return res.Body
}
}
func replaceCdn(reader io.Reader) io.Reader {
buf := new(bytes.Buffer)
buf.ReadFrom(reader)
original := buf.String()
replaced := cdnPattern.ReplaceAllString(original, "")
return strings.NewReader(replaced)
}
func injectLocalPlugins(reader io.Reader) io.Reader {
if len(*plugins) == 0 {
return reader
}
// Skip escape prefix
io.CopyN(ioutil.Discard, reader, 5)
dec := json.NewDecoder(reader)
var response map[string]interface{}
err := dec.Decode(&response)
if err != nil {
log.Fatal(err)
}
// Configuration path in the JSON server response
jsPluginsPath := []string{"plugin", "js_resource_paths"}
htmlPluginsPath := []string{"plugin", "html_resource_paths"}
htmlResources := getJsonPropByPath(response, htmlPluginsPath).([]interface{})
jsResources := getJsonPropByPath(response, jsPluginsPath).([]interface{})
for _, p := range strings.Split(*plugins, ",") {
if strings.HasSuffix(p, ".html") {
htmlResources = append(htmlResources, p)
}
if strings.HasSuffix(p, ".js") {
jsResources = append(jsResources, p)
}
}
setJsonPropByPath(response, jsPluginsPath, jsResources)
setJsonPropByPath(response, htmlPluginsPath, htmlResources)
reader, writer := io.Pipe()
go func() {
defer writer.Close()
io.WriteString(writer, ")]}'") // Write escape prefix
err := json.NewEncoder(writer).Encode(&response)
if err != nil {
log.Fatal(err)
}
}()
return reader
}
func handleAccountDetail(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
}
type gzipResponseWriter struct {
io.WriteCloser
http.ResponseWriter
}
func newGzipResponseWriter(w http.ResponseWriter) *gzipResponseWriter {
gz := gzip.NewWriter(w)
return &gzipResponseWriter{WriteCloser: gz, ResponseWriter: w}
}
func (w gzipResponseWriter) Write(b []byte) (int, error) {
return w.WriteCloser.Write(b)
}
func (w gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := w.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("gzipResponseWriter: ResponseWriter does not satisfy http.Hijacker interface")
}
return h.Hijack()
}
type server struct{}
// Any path prefixes that should resolve to index.html.
var (
fePaths = []string{"/q/", "/c/", "/p/", "/x/", "/dashboard/", "/admin/"}
issueNumRE = regexp.MustCompile(`^\/\d+\/?$`)
)
func (_ *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s %s %s\n", r.Proto, r.Method, r.RemoteAddr, r.URL)
for _, prefix := range fePaths {
if strings.HasPrefix(r.URL.Path, prefix) || r.URL.Path == "/" {
r.URL.Path = "/index.html"
log.Println("Redirecting to /index.html")
break
} else if match := issueNumRE.Find([]byte(r.URL.Path)); match != nil {
r.URL.Path = "/index.html"
log.Println("Redirecting to /index.html")
break
}
}
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
http.DefaultServeMux.ServeHTTP(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gzw := newGzipResponseWriter(w)
defer gzw.Close()
http.DefaultServeMux.ServeHTTP(gzw, r)
}
| [
"\"BUILD_WORKSPACE_DIRECTORY\""
]
| []
| [
"BUILD_WORKSPACE_DIRECTORY"
]
| [] | ["BUILD_WORKSPACE_DIRECTORY"] | go | 1 | 0 | |
back/main.go | package main
import (
"flag"
"fmt"
"karto/api"
"karto/clusterlistener"
"karto/types"
"os"
"path/filepath"
)
const version = "1.5.0"
func main() {
versionFlag, k8sConfigPath := parseCmd()
if versionFlag {
fmt.Printf("Karto v%s\n", version)
os.Exit(0)
}
container := dependencyInjection()
analysisScheduler := container.AnalysisScheduler
analysisResultsChannel := make(chan types.AnalysisResult)
clusterStateChannel := make(chan types.ClusterState)
go clusterlistener.Listen(k8sConfigPath, clusterStateChannel)
go analysisScheduler.AnalyzeOnClusterStateChange(clusterStateChannel, analysisResultsChannel)
api.Expose(":8000", analysisResultsChannel)
}
func parseCmd() (bool, string) {
versionFlag := flag.Bool("version", false, "prints Karto's current version")
home := os.Getenv("HOME")
if home == "" {
home = os.Getenv("USERPROFILE")
}
var k8sConfigPath *string
if home != "" {
k8sConfigPath = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"),
"(optional) absolute path to the kubeconfig file")
} else {
k8sConfigPath = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
return *versionFlag, *k8sConfigPath
}
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
localibrary2/wsgi.py | """
WSGI config for localibrary2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "localibrary2.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
geophylo.py | VERSION_NOTICE = None
VERSION_NOTICE = None
VERSION_NOTE = None
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
import os, sys, string, Cookie, sha, time, random, cgi, urllib
import datetime, StringIO, pickle, urllib2
import feedparser
import zipfile
import wsgiref.handlers
from google.appengine.api import memcache, urlfetch
from google.appengine.ext.webapp import template
from django.utils import feedgenerator, simplejson
from django.template import Context, Template
import logging
from offsets import *
from tutorials import *
from tree_parse import *
from phyloxml import *
from forum import *
# HTTP codes
HTTP_NOT_ACCEPTABLE = 406
HTTP_NOT_FOUND = 404
RSS_MEMCACHED_KEY = "rss"
class usageStats(db.Model):
ntrees = db.IntegerProperty()
ntaxa = db.IntegerProperty()
nflyto = db.IntegerProperty()
netlink = db.IntegerProperty()
mykey = db.IntegerProperty()
date = db.DateTimeProperty(auto_now_add=True)
class kmlStore(db.Model):
kmlId = db.StringProperty()
kmlText = db.TextProperty()
kmzBlob = db.BlobProperty()
kmlName = db.StringProperty()
authorName = db.StringProperty(default="Geophylo Engine")
isPermanent = db.BooleanProperty(default=False)
isPublic = db.BooleanProperty(default=False)
last_access_date = db.DateTimeProperty(auto_now_add=True)
last_update_date = db.DateTimeProperty(auto_now_add=True)
nSeed = db.IntegerProperty(default=0)
downloadCt = db.IntegerProperty(default=1)
version = db.StringProperty(default="1-0")
def ZipFiles(data):
zipstream=StringIO.StringIO()
file = zipfile.ZipFile(file=zipstream,compression=zipfile.ZIP_DEFLATED,mode="w")
file.writestr("doc.kml",data.encode("utf-8"))
file.close()
zipstream.seek(0)
return zipstream.getvalue()
def UnzipFiles(data):
if str(data.filename)[-3:]=="zip":
data = data.file
try:
tmp = zipfile.ZipFile(data, 'r')
names = []
for fn in tmp.namelist():
names.append(fn)
data = tmp.read(names[0])
except:
try:
data = open(data,'r').read()
except:
return 'error'
else:
data = data.file.read()
return data
def forum_activity():
feeds = []
siteroot = 'http://geophylo.appspot.com'
forum = Forum.gql("WHERE url = :1", 'forum').get()
topics = Topic.gql("WHERE forum = :1 AND is_deleted = False ORDER BY created_on DESC", forum).fetch(3)
for topic in topics:
title = topic.subject
link = siteroot + "topic?id=" + str(topic.key().id())
first_post = Post.gql("WHERE topic = :1 ORDER BY created_on", topic).get()
msg = first_post.message
# TODO: a hack: using a full template to format message body.
# There must be a way to do it using straight django APIs
name = topic.created_by
feeds.append("<a href="+str(link)+">"+str(title)[0:60]+"</a></br>")
feeds.append(str(name)+" <i> "+str(msg)[0:110]+"...</i></br>")
return feeds
class LibraryPage(webapp.RequestHandler):
def get(self):
limit = 12
try:
offset = int(cgi.escape(self.request.get('next')))
if offset<0:
offset = 0
except:
offset = 0
usage_line = ''
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 10")
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0].replace('-','.'))
for info in stats:
if info.ntrees:
usage_line += str(info.ntrees)+' trees | '
if info.ntaxa:
usage_line += str(info.ntaxa)+' taxa | '
if info.nflyto:
usage_line += str(info.nflyto)+' flytos | '
if info.netlink:
usage_line += str(info.netlink)+' netlinks'
"""mod/run this when adding new field to track
info.netlink = 1
db.put(info)"""
mydomain = "http://geophylo.appspot.com"
query = db.GqlQuery('SELECT * FROM kmlStore WHERE isPublic = True ORDER BY last_update_date DESC')
public_trees = query.fetch(limit,offset)
entries = []
for row in public_trees:
entry = {}
entry['author'] = row.authorName
entry['name'] = row.kmlName
pubkey = row.key()
storeKey = str(row.nSeed)+"T"+str(pubkey)
entry['link'] = "http://%s.latest.geophylo.appspot.com/pubkey-%s/networklink.kml" % (version.replace('.','-'),storeKey)
entry['update'] = str(row.last_update_date)
entry['update'] = entry['update'].split(' ')[0]
entries.append(entry)
if len(entries)==0:
ct = offset
while ct<limit+offset:
entry = {}
entry['author'] = 'Dr. Author'
entry['name'] = 'Tigon %s' % ct
pubkey = 'ABCddddABC'
entry['link'] = "http://%s.latest.geophylo.appspot.com/pubkey-%s/networklink.kml" % (version.replace('.','-'),pubkey)
entry['update'] = '2009-06-24 22:03:43.049066'
entry['update'] = entry['update'].split(' ')[0]
entries.append(entry)
ct+=1
template_values = {}
template_values['version'] = version.replace('.','-')
template_values['last'] = 0 if offset-limit < 0 else offset-limit
template_values['next'] = offset+10
template_values['entries'] = entries
template_values['usage_line'] = usage_line
template_values['feeds'] = ''
template_values['notice'] = VERSION_NOTICE
path = os.path.join(os.path.dirname(__file__), 'templates/header.html')
template_values['header'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/library.html')
template_values['content'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/main.html')
self.response.out.write(template.render(path, template_values))
class AboutPage(webapp.RequestHandler):
def get(self):
usage_line = ''
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 10")
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0].replace('-','.'))
for info in stats:
if info.ntrees:
usage_line += str(info.ntrees)+' trees | '
if info.ntaxa:
usage_line += str(info.ntaxa)+' taxa | '
if info.nflyto:
usage_line += str(info.nflyto)+' flytos | '
if info.netlink:
usage_line += str(info.netlink)+' netlinks'
"""mod/run this when adding new field to track
info.netlink = 1
db.put(info)"""
mydomain = "http://geophylo.appspot.com"
feeds = forum_activity()
template_values = {}
template_values['version'] = version.replace('.','-')
template_values['usage_line'] = usage_line
template_values['feeds'] = feeds
template_values['notice'] = VERSION_NOTICE
path = os.path.join(os.path.dirname(__file__), 'templates/header.html')
template_values['header'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/recent_activity.html')
template_values['recent_activity'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/about.html')
template_values['content'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/main.html')
self.response.out.write(template.render(path, template_values))
class MainPage(webapp.RequestHandler):
def get(self):
usage_line = ''
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 10")
author_key_value = ''.join(random.choice(string.letters) for i in xrange(30))
for info in stats:
if info.ntrees:
usage_line += str(info.ntrees)+' trees | '
if info.ntaxa:
usage_line += str(info.ntaxa)+' taxa | '
if info.nflyto:
usage_line += str(info.nflyto)+' flytos | '
if info.netlink:
usage_line += str(info.netlink)+' netlinks'
"""mod/run this when adding new field to track
info.netlink = 1
db.put(info)"""
mydomain = "http://geophylo.appspot.com"
feeds = forum_activity()
"""
curss = GenericFeed("http://geophylo.appspot.com/forum/rss","Discussion Forum")
ct = 0
feeds = []
while ct<3:
i = curss.feed[ct]
feeds.append("<a href="+str(i.link)+">"+str(i.title)[0:60]+"</a></br>")
feeds.append("<i>"+str(i.content)[0:110]+"...</i></br>")
ct += 1
"""
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0].replace('-','.'))
template_values = {}
template_values['version'] = version.replace('.','-')
template_values['usage_line'] = usage_line
template_values['feeds'] = feeds
template_values['author_key_value'] = author_key_value
template_values['notice'] = VERSION_NOTICE
path = os.path.join(os.path.dirname(__file__), 'templates/header.html')
template_values['header'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/recent_activity.html')
template_values['recent_activity'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/engine.html')
template_values['content'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/main.html')
self.response.out.write(template.render(path, template_values))
class PhyloJSON(webapp.RequestHandler):
def post(self):
if self.request.get("method") == 'test':
phyloxml = open('Baeolophus_np.xml','r').read()
elif self.request.get("phyloxml") != '' and self.request.get("phyloxml") is not None:
phyloxml = UnzipFiles(self.request.POST.get('phyloxml'))
else:
phyloxml = open('Baeolophus_np.xml','r').read()
#set defaults
branch_color = "FFFFFFFF"
branch_width = 1.5
icon = "http://geophylo.appspot.com/static_files/icons/a99.png"
proximity = 2
alt_grow = 15000
title = "GeoPhyloEngine"
tree = PhyloXMLtoTree(phyloxml,title,alt_grow=alt_grow,proximity=proximity,branch_color=branch_color,branch_width=branch_width,icon=icon)
tree.load()
out = ''
output = []
for a,b in tree.objtree.tree.items():
if a != 0:
output.append(b.json())
tree = ''
data = {}
data['data'] = output
#self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(simplejson.dumps(data).replace('\\/','/'))
def get(self):
phyloxml = open('Baeolophus_np.xml','r').read()
#set defaults
branch_color = "FFFFFFFF"
branch_width = 1.5
icon = "http://geophylo.appspot.com/static_files/icons/a99.png"
proximity = 2
alt_grow = 15000
title = "GeoPhyloEngine"
tree = PhyloXMLtoTree(phyloxml,title,alt_grow=alt_grow,proximity=proximity,branch_color=branch_color,branch_width=branch_width,icon=icon)
tree.load()
out = ''
output = []
for a,b in tree.objtree.tree.items():
if a != 0:
output.append(b.json())
tree = ''
if self.request.get("callback") != '':
output = {'items': output}
output = simplejson.dumps(output).replace('\\/','/')
func = str(self.request.get("callback"))
output = func+'('+output+')'
else:
output = simplejson.dumps(output, sort_keys=True, indent=4).replace('\\/','/')
self.response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
#self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(output)
#self.response.out.write(simplejson.dumps(data))
class TestPhyloXML(webapp.RequestHandler):
def get(self):
#phyloxml = open('amphi_frost.xml','r').read()
phyloxml = open('Baeolophus_np.xml','r').read()
branch_color = "FFFFFFFF"
branch_width = 1.5
icon = "http://geophylo.appspot.com/static_files/icons/a99.png"
proximity = 2
alt_grow = 15000
title = "GeoPhyloEngine"
#tree = PhyloXMLtoTree(phyloxml,alt_grow=alt_grow,proximity=proximity,branch_color=branch_color,branch_width=branch_width,icon=icon)
tree = PhyloXMLtoTree(phyloxml,title,alt_grow=alt_grow,proximity=proximity,branch_color=branch_color,branch_width=branch_width,icon=icon)
tree.load()
desc = os.path.join(os.path.dirname(__file__), 'templates/leaf_description.html')
output = ''
for a,b in tree.objtree.tree.items():
if a!=0:
b.buildkml()
if b.node_id == '41536':
output = template.render(desc, b.kml)
self.response.out.write(output)
"""
tree,kml = gen_kml(tree)
kml = ZipFiles(kml)
self.response.headers['Content-Type'] = "application/vnd.google-earth.kml+xml"
self.response.out.write(kml)
"""
class GenerateKML(webapp.RequestHandler):
def post(self):
#advanced fields
#get branch color
try:
branch_color = cgi.escape(self.request.get('branch_color'))
except:
branch_color = "FFFFFFFF" #default branch color
if branch_color == '':
branch_color = "FFFFFFFF"
try:
author_name = cgi.escape(self.request.get('author_name'))
except:
author_name = "Geophylo Engine" #default branch color
if branch_color == '':
author_name = "Geophylo Engine"
#get branch color
try:
branch_width = int(cgi.escape(self.request.get('branch_width')))
except:
branch_width = 1.5 #default branch color
if branch_width == '':
branch_width = 1.5
if branch_width < 1 or 20 < branch_width:
branch_width = 1.5
#get internal node icon url
try:
icon = cgi.escape(self.request.get('node_url'))
except:
icon = "http://geophylo.appspot.com/static_files/icons/a99.png"
if icon == '':
icon = "http://geophylo.appspot.com/static_files/icons/a99.png"
#get altitude growth factor
try:
alt_grow = float(cgi.escape(self.request.get('alt_grow')))
except:
alt_grow = 10000
if alt_grow == '':
alt_grow = 10000
#get the decimal point to round lat/long to to generate autospread
try:
proximity = cgi.escape(self.request.get('proximity'))
except:
proximity = 2
try:
proximity = int(proximity)
if proximity < 1:
proximity = 1
except:
proximity = 2
#find out if the user wants to create a permalink
#the default is false
#the default timelength is false (meaning one month)
permalink = False #so don't store
permanent = False #if we do store, default to one month
public = False
try:
permalink = cgi.escape(self.request.get('permalink'))
except:
permalink = False
if permalink == '':
permalink = False
elif permalink == 'yes':
#if we are permalinking the kml, see if the user wants it forever
new_private_key_value = self.request.get('new_private_key_value')
if new_private_key_value != '':
try:
new_private_key_value = str(new_private_key_value)
except:
new_private_key_value = ''
if self.request.get('storage_time') == "permanent":
permanent = True
try:
if self.request.get('public') == 'yes':
public = True
except:
public = False
#get the title of the KML that the user wants
try:
title = cgi.escape(self.request.get('kml_title'))
except:
title = "GeoPhyloEngine"
if title == '':
title = "GeoPhyloEngine"
#If the user has a public/private key, we will see if the kml exists
#get private key
try:
private_key = cgi.escape(self.request.get('private_key'))
update_kml = True
except:
private_key = ''
update_kml = False
if private_key == '':
update_kml = False
#get public key
try:
public_key = cgi.escape(self.request.get('public_key'))
public_key = public_key.replace('pubkey-','')
except:
public_key = ''
update_kml = False
if public_key == '':
update_kml = False
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0])
mydomain = "http://geophylo.appspot.com"
if self.request.get("phyloxml") == '':
tree = self.request.get('tree')
coords = self.request.get('coords')
kmlMeta = build_kml(tree,coords,mydomain,branch_color,branch_width,icon,alt_grow,proximity,title)
kml = kmlMeta.kml
taxa = kmlMeta.taxa
err = kmlMeta.err
type = kmlMeta.type
else:
phyloxml = UnzipFiles(self.request.POST.get('phyloxml'))
phylo = PhyloXMLtoTree(phyloxml,title,alt_grow=alt_grow,proximity=proximity,branch_color=branch_color,branch_width=branch_width,icon=icon)
phylo.load()
phylo,kml = gen_kml(phylo)
taxa = len(phylo.objtree.tree)
err = None
type = None
if (kml == "" or taxa == 0) and err:
self.response.out.write(type)
else:
#to save storage and BW, zip the KML
kml = ZipFiles(kml)
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 1")
for info in stats:
info.ntrees += 1
info.ntaxa += taxa
db.put(info)
if update_kml:
found = False;
seed = string.find(public_key,'T')
storeKey = public_key[(seed+1):]
seed = int(public_key[:seed])
try:
q = db.get(db.Key(storeKey))
found = True
except:
self.response.out.write("""Invalid private or public key""")
self.response.out.write(public_key)
if found:
if cmp(str(q.kmlId),str(private_key))==0 and seed == q.nSeed:
q.kmzBlob = kml
q.kmlName = title
q.authorName = author_name
q.last_update_date = datetime.datetime.now()
q.nSeed = q.nSeed
q.version = version.replace('.','-')
if public is True:
q.isPublic = True
if permanent is True:
q.isPermanent = permanent
db.put(q)
template_values = {}
template_values['title']=title
template_values['pubkey']=public_key
template_values['author']=author_name
template_values['version']=version.replace('.','-')
self.response.headers['Content-Type'] = "application/vnd.google-earth.kml+xml"
path = os.path.join(os.path.dirname(__file__), 'templates/network_link.kml')
self.response.out.write(template.render(path, template_values))
else:
self.response.out.write("""Invalid private or public key""")
elif permalink:
#if permalink, then store the kml in the datastore and send the user a network link kml
#store in datastore
import random
docDesc = "<Document>\n<description><![CDATA[&descx]]></description>"
kml = kml.replace("<Document>",docDesc)
kml_entry = kmlStore(kmlId = new_private_key_value,
kmzBlob = kml,
kmlName = title,
authorName = author_name,
isPermanent = permanent,
isPublic = public,
version = version.replace('.','-'),
nSeed = int(taxa))
kml_entry.put()
key = kml_entry.key()
key = str(taxa)+"T"+str(key)
template_values = {}
template_values['title']=title
template_values['pubkey']=key
template_values['author']=author_name
template_values['version']=version.replace('.','-')
self.response.headers['Content-Type'] = "application/vnd.google-earth.kml+xml"
path = os.path.join(os.path.dirname(__file__), 'templates/network_link.kml')
self.response.out.write(template.render(path, template_values))
else:
#if not permalink, then just send the user a oneoff kml
self.response.headers['Content-Type'] = 'application/vnd.google-earth.kml+xml'
self.response.out.write(str(kml))
class NetworkLink(webapp.RequestHandler):
def get(self):
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 1")
for info in stats:
info.netlink = info.netlink + 1
db.put(info)
url = self.request.path_info
assert '/' == url[0]
path = url[1:]
if '/' in path:
(kmlKey, networklink) = path.split("/", 1)
else:
kmlKey = path
kmlKey = str(kmlKey).replace('pubkey-','')
seed = string.find(kmlKey,'T')
storeKey = kmlKey[(seed+1):]
seed = int(kmlKey[:seed])
q = db.get(db.Key(storeKey))
if q.nSeed == seed or q.nSeed == None:
if q.kmzBlob is None:
kml = q.kmlText
else:
kml = q.kmzBlob
name = q.kmlName
lad = q.last_access_date
lud = q.last_update_date
q.last_access_date = datetime.datetime.now()
q.downloadCt = q.downloadCt + 1
db.put(q)
self.response.headers['Content-Type'] = "application/vnd.google-earth.kml+xml"
self.response.out.write(kml)
class FlyToCoords(webapp.RequestHandler):
def get(self):
lat = float(cgi.escape(self.request.get('lat')))
lon = float(cgi.escape(self.request.get('lon')))
alt = float(cgi.escape(self.request.get('alt')))
if alt<5000.0:
range = 2000
else:
range = 10000
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 1")
for info in stats:
info.nflyto += 1
db.put(info)
template_values = {}
template_values['lon'] = str(lon)
template_values['lat'] = str(lat)
template_values['alt'] = str(int(alt))
template_values['range'] = str(range)
self.response.headers['Content-Type'] = "application/vnd.google-earth.kml+xml"
#self.response.out.write(str(kml))
path = os.path.join(os.path.dirname(__file__), 'templates/fly_to.kml')
self.response.out.write(template.render(path, template_values))
class PrintTree(webapp.RequestHandler):
def get(self):
usage_line = ''
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 10")
author_key_value = ''.join(random.choice(string.letters) for i in xrange(30))
for info in stats:
if info.ntrees:
usage_line += str(info.ntrees)+' trees | '
if info.ntaxa:
usage_line += str(info.ntaxa)+' taxa | '
if info.nflyto:
usage_line += str(info.nflyto)+' flytos | '
if info.netlink:
usage_line += str(info.netlink)+' netlinks'
"""mod/run this when adding new field to track
info.netlink = 1
db.put(info)"""
mydomain = "http://geophylo.appspot.com"
curss = GenericFeed("http://geophylo.appspot.com/forum/rss","Discussion Forum")
#result = urlfetch.fetch("http://geophylo.appspot.com/talk/rss")
ct = 0
feeds = []
for i in curss.feed:
feeds.append("<a href="+str(i.link)+">"+str(i.title)[0:60]+"</a></br>")
feeds.append("<i>"+str(i.content)[0:110]+"...</i></br>")
ct += 1
if ct==3:
break
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0].replace('-','.'))
template_values = {}
template_values['version'] = version.replace('.','-')
template_values['usage_line'] = usage_line
template_values['feeds'] = feeds
template_values['notice'] = VERSION_NOTICE
path = os.path.join(os.path.dirname(__file__), 'templates/header.html')
template_values['header'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/printtree.html')
template_values['content'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/main.html')
self.response.out.write(template.render(path, template_values))
class PrintCoords(webapp.RequestHandler):
def get(self):
usage_line = ''
stats = db.GqlQuery("SELECT * FROM usageStats WHERE mykey = 0 LIMIT 10")
author_key_value = ''.join(random.choice(string.letters) for i in xrange(30))
for info in stats:
if info.ntrees:
usage_line += str(info.ntrees)+' trees | '
if info.ntaxa:
usage_line += str(info.ntaxa)+' taxa | '
if info.nflyto:
usage_line += str(info.nflyto)+' flytos | '
if info.netlink:
usage_line += str(info.netlink)+' netlinks'
"""mod/run this when adding new field to track
info.netlink = 1
db.put(info)"""
mydomain = "http://geophylo.appspot.com"
curss = GenericFeed("http://geophylo.appspot.com/forum/rss","Discussion Forum")
#result = urlfetch.fetch("http://geophylo.appspot.com/talk/rss")
ct = 0
feeds = []
for i in curss.feed:
feeds.append("<a href="+str(i.link)+">"+str(i.title)[0:60]+"</a></br>")
feeds.append("<i>"+str(i.content)[0:110]+"...</i></br>")
ct += 1
if ct==3:
break
version = os.environ['CURRENT_VERSION_ID'].split('.')
version = str(version[0].replace('-','.'))
template_values = {}
template_values['version'] = version.replace('.','-')
template_values['usage_line'] = usage_line
template_values['feeds'] = feeds
template_values['notice'] = VERSION_NOTICE
path = os.path.join(os.path.dirname(__file__), 'templates/header.html')
template_values['header'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/printcoords.html')
template_values['content'] = template.render(path, template_values)
path = os.path.join(os.path.dirname(__file__), 'templates/main.html')
self.response.out.write(template.render(path, template_values))
class weeklyCron(webapp.RequestHandler):
def get(self):
one_month_ago = datetime.datetime.now() - datetime.timedelta(days=31)
"SELECT * FROM kmlStore WHERE last_update_date < %s AND isPermanent = %s" % (one_month_ago,False)
que = db.GqlQuery(query)
res = que.fetch(999)
db.delete(res)
application = webapp.WSGIApplication(
[('/', MainPage),
('/library', LibraryPage),
('/testphyloxml', TestPhyloXML),
('/phylojson',PhyloJSON),
('/about', AboutPage),
('/weeklycron', weeklyCron),
('/output.kml', GenerateKML),
('/phyloxml.kml', TestPhyloXML),
('/flyto.kml', FlyToCoords),
('/tree', PrintTree),
('/coords', PrintCoords),
('/[^/]+/networklink.kml', NetworkLink),
('/tutorial01', Tutorial01),
('/tutorial02', Tutorial02),
('/tutorial03', Tutorial03),
('/Forums', ForumList),
('/manageforums', ManageForums),
('/[^/]+/postdel', PostDelUndel),
('/[^/]+/postundel', PostDelUndel),
('/[^/]+/post', PostForm),
('/[^/]+/topic', TopicForm),
('/[^/]+/email', EmailForm),
('/[^/]+/rss', RssFeed),
('/[^/]+/rssall', RssAllFeed),
('/[^/]+/importfruitshow', ImportFruitshow),
('/[^/]+/?', TopicList)],
debug=False)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| []
| []
| [
"CURRENT_VERSION_ID"
]
| [] | ["CURRENT_VERSION_ID"] | python | 1 | 0 | |
models/daotest/models_test.go | package daotest
import (
"os"
"testing"
"reflect"
"fmt"
"bytes"
"encoding/gob"
"encoding/hex"
"github.com/MetaLife-Protocol/SuperNode/codefortest"
"github.com/MetaLife-Protocol/SuperNode/log"
"github.com/MetaLife-Protocol/SuperNode/models"
"github.com/MetaLife-Protocol/SuperNode/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, utils.MyStreamHandler(os.Stderr)))
}
func TestToken(t *testing.T) {
dao := codefortest.NewTestDB("")
defer dao.CloseDB()
var cbtokens []common.Address
funcb := func(token common.Address) bool {
cbtokens = append(cbtokens, token)
return false
}
ts, err := dao.GetAllTokens()
if len(ts) > 0 {
t.Error("should not found")
}
if len(ts) != 0 {
t.Error("should be empty")
}
var am = make(models.AddressMap)
t1 := utils.NewRandomAddress()
am[t1] = utils.NewRandomAddress()
dao.RegisterNewTokenCallback(funcb)
err = dao.AddToken(t1, am[t1])
if err != nil {
t.Error(err)
}
am2, _ := dao.GetAllTokens()
assert.EqualValues(t, am, am2)
t2 := utils.NewRandomAddress()
am[t2] = utils.NewRandomAddress()
err = dao.AddToken(t2, am[t2])
if err != nil {
t.Error(err)
}
if len(cbtokens) != 2 && cbtokens[0] != t1 {
t.Error("add token error")
}
am2, _ = dao.GetAllTokens()
assert.EqualValues(t, am, am2)
}
func TestGob(t *testing.T) {
s1 := common.HexToAddress(os.Getenv("TOKEN_NETWORK"))
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(s1)
if err != nil {
t.Error(err)
return
}
encodedData := buf.Bytes()
fmt.Printf("first\n%s", hex.Dump(encodedData))
dec := gob.NewDecoder(bytes.NewBuffer(encodedData))
var sb common.Address
err = dec.Decode(&sb)
if err != nil {
t.Error(err)
return
}
if !reflect.DeepEqual(s1, sb) {
t.Error("not equal")
}
var buf2 bytes.Buffer
enc2 := gob.NewEncoder(&buf2)
enc2.Encode(&sb)
encodedData2 := buf2.Bytes()
fmt.Printf("second\n%s", hex.Dump(encodedData2))
if !reflect.DeepEqual(encodedData, encodedData2) {
t.Error("not equal")
}
}
func TestGobAddressMap(t *testing.T) {
am := make(models.AddressMap)
k1 := utils.NewRandomAddress()
am[k1] = utils.NewRandomAddress()
am[utils.NewRandomAddress()] = utils.NewRandomAddress()
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(am)
if err != nil {
t.Error(err)
return
}
encodedData := buf.Bytes()
dec := gob.NewDecoder(bytes.NewBuffer(encodedData))
var am2 models.AddressMap
err = dec.Decode(&am2)
if err != nil {
t.Error(err)
return
}
if !reflect.DeepEqual(am, am2) {
t.Error("not equal")
return
}
var buf2 bytes.Buffer
enc2 := gob.NewEncoder(&buf2)
enc2.Encode(&am2)
encodedData2 := buf2.Bytes()
//should not use bytes equal, because map's random visit mechanism
//if !reflect.DeepEqual(encodedData, encodedData2) {
if len(encodedData) != len(encodedData2) {
t.Errorf("not equal,encodedata=%s,encodedata2=%s", utils.StringInterface(encodedData, 2), utils.StringInterface(encodedData2, 2))
panic("err")
}
}
func TestGob2(t *testing.T) {
for i := 0; i < 50; i++ {
TestGobAddressMap(t)
}
}
func TestWithdraw(t *testing.T) {
dao := codefortest.NewTestDB("")
defer dao.CloseDB()
channel := utils.NewRandomHash()
secret := utils.ShaSecret(channel[:])
r := dao.IsThisLockHasUnlocked(channel, secret)
if r == true {
t.Error("should be false")
return
}
dao.UnlockThisLock(channel, secret)
r = dao.IsThisLockHasUnlocked(channel, secret)
if r == false {
t.Error("should be true")
return
}
r = dao.IsThisLockHasUnlocked(utils.NewRandomHash(), secret)
if r == true {
t.Error("shoulde be false")
return
}
}
func TestModelDB_IsThisLockRemoved(t *testing.T) {
dao := codefortest.NewTestDB("")
defer dao.CloseDB()
channel := utils.NewRandomHash()
secret := utils.ShaSecret(channel[:])
sender := utils.NewRandomAddress()
r := dao.IsThisLockRemoved(channel, sender, secret)
if r {
t.Error("should be false")
return
}
dao.RemoveLock(channel, sender, secret)
r = dao.IsThisLockRemoved(channel, sender, secret)
if !r {
t.Error("should be true")
return
}
}
| [
"\"TOKEN_NETWORK\""
]
| []
| [
"TOKEN_NETWORK"
]
| [] | ["TOKEN_NETWORK"] | go | 1 | 0 | |
app/tests/v2/base_test.py | import os
import unittest
from app import createapp
from ...api.v2.models import create_tables, drop_tables, create_admin
#urls for user endpoints
REGISTER_URL = 'api/v1/v2/user'
SINGLE_USER_URL = 'api/v1/v2/[email protected]'
DELETE_USER_URL = 'api/v1/v2/[email protected]'
USERS_URL = 'api/v1/v2/users'
class BaseTest(unittest.TestCase):
'''initializes data and env for the test modules'''
def setUp(self):
'''sets up the test client'''
self.app = createapp(os.getenv('APP_SETTINGS'))
self.client = self.app.test_client()
self.app_context = self.app.app_context()
with self.app.app_context():
drop_tables()
create_tables()
# variables for the user endpoints;
# including but not limited to: logging in and signing up
self.valid_entry = {
"firstname": "solo",
"lastname": "compredo",
"othername": "nyakahia",
"email": "[email protected]",
"phoneNumber": "0723143761",
"username": "capriereceor",
"password": "despacito"
}
def tearDown(self):
'''removes all the testing data created'''
drop_tables()
return "all clear"
| []
| []
| [
"APP_SETTINGS"
]
| [] | ["APP_SETTINGS"] | python | 1 | 0 | |
app.py | from flask import Flask, request, jsonify, abort
import logging
import ssl
import json, os, sys, glob
#######################
#### Utilities ########
#######################
app = Flask(__name__, static_folder='./build/', static_url_path="/")
#app = Flask(__name__, static_folder='./public/')
if __name__ == '__main__':
print(app.url_map)
#context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
#context.load_cert_chain('server.crt', 'server.key')
print('start')
files = glob.glob('./*')
print(files)
files = glob.glob('./build/*')
print(files)
port = int(os.environ.get("PORT", 38888))
#app.run(debug=True, use_reloader=False, host='0.0.0.0', port=port, ssl_context=context, threaded=True)
app.run(host='0.0.0.0', port=port)
print('......')
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
main.go | package main
import (
"errors"
"fmt"
"os"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
)
func main() {
if err := root(os.Args[1:]); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
var (
endpoint = os.Getenv("OSS_ENDPOINT")
accessID = os.Getenv("OSS_AK")
accessKey = os.Getenv("OSS_SK")
bucketName = os.Getenv("OSS_BUCKET")
)
type Runner interface {
Init([]string) error
Run() error
Name() string
}
func root(args []string) error {
if len(args) < 1 {
return errors.New("You must pass a sub-command")
}
cmds := []Runner{
NewCreateCmd(),
NewListCmd(),
NewDelCmd(),
NewInfoCmd(),
NewSignCmd(),
}
subcommand := os.Args[1]
for _, cmd := range cmds {
if cmd.Name() == subcommand {
cmd.Init(os.Args[2:])
return cmd.Run()
}
}
return fmt.Errorf("Unknown subcommand: %s", subcommand)
}
// GetBucket creates the test bucket
func GetBucket(bucketName string) (*oss.Bucket, error) {
// New client
client, err := oss.New(endpoint, accessID, accessKey)
if err != nil {
return nil, err
}
// Get bucket
bucket, err := client.Bucket(bucketName)
if err != nil {
return nil, err
}
return bucket, nil
}
// HandleError is the error handling method in the sample code
func HandleError(err error) {
fmt.Println("occurred error:", err)
os.Exit(-1)
}
| [
"\"OSS_ENDPOINT\"",
"\"OSS_AK\"",
"\"OSS_SK\"",
"\"OSS_BUCKET\""
]
| []
| [
"OSS_AK",
"OSS_SK",
"OSS_ENDPOINT",
"OSS_BUCKET"
]
| [] | ["OSS_AK", "OSS_SK", "OSS_ENDPOINT", "OSS_BUCKET"] | go | 4 | 0 | |
function/synctickers.py | ## https://github.com/dsmorgan/yacgb
import ccxt
import time
import datetime
from datetime import timezone
import os
import logging
from yacgb.awshelper import yacgb_aws_ps
from yacgb.ohlcv_sync import save_candles, candle_limits
from model.market import Market, market_init
from model.ohlcv import OHLCV, ohlcv_init
logger=logging.getLogger()
logger.setLevel(logging.INFO)
logger.info("CCXT version: %s" % ccxt.__version__)
#AWS parameter store usage is optional, and can be overridden with environment variables
psconf=yacgb_aws_ps(with_decryption=False)
# Create tables in dynamodb if they don't exist already
market_init()
ohlcv_init()
# load the configured exchange from ccxt, load_markets is needed to initialize
myexch={}
for e in psconf.exch:
myexch[e] = eval ('ccxt.%s ()' % e)
myexch[e].setSandboxMode(psconf.exch_sandbox[e])
myexch[e].enableRateLimit = False
myexch[e].load_markets()
def lambda_handler(event, context):
global myexch
global psconf
psconf.collect()
for d in psconf.del_exch:
logger.info("config change, deleting exchange config: %s" % d)
del(myexch[d])
for a in psconf.new_exch:
logger.info("config change, new exchange config: %s" % a)
myexch[a] = eval ('ccxt.%s ()' % a)
myexch[a].setSandboxMode(psconf.exch_sandbox[a])
myexch[a].enableRateLimit = False
myexch[a].load_markets()
logger.info("exchange:market %s" % str(psconf.market_list))
for x in psconf.market_list:
nowdt = datetime.datetime.now(timezone.utc)
thisminute = nowdt.replace(second=0, microsecond=0)
exchange = x.split(':', 1)[0]
market_symbol = x.split(':', 1)[1]
logger.debug("syncing %s %s last:%s" %(exchange, market_symbol, str(nowdt)))
####
# If this is the 1st time we've attempted to get this exchange + market_symbol, then save a table entry w/ some details
#TODO refactor these out
try:
exchange_item = Market.get(exchange, market_symbol)
except Market.DoesNotExist:
#fetch new market info
market = myexch[exchange].market(market_symbol)
exchange_item = Market(exchange, market_symbol,
precision_base=market['precision']['amount'],
precision_quote=market['precision']['price'],
maker=market['maker'],
taker=market['taker'],
limits_amount_max=market['limits']['amount']['max'],
limits_amount_min=market['limits']['amount']['min'],
limits_cost_max=market['limits']['cost']['max'],
limits_cost_min=market['limits']['cost']['min'],
limits_price_max=market['limits']['price']['max'],
limits_price_min=market['limits']['price']['min'])
exchange_item.save()
logger.info('Created new Market entry [' + exchange + '] [' + market_symbol + ']')
#Determine the number of candles to request and save, for each duration
cl = candle_limits(exchange_item.last_timestamp, thisminute)
if exchange_item.last_timestamp == None:
#store the current timestamp(s) in START
exchange_item.start_timestamp = int(nowdt.timestamp())
exchange_item.start = str(nowdt)
#store the current minute timestamp(s) in LAST
exchange_item.last_timestamp = int(thisminute.timestamp())
#This uses nowdt instead of thisminute, because we need the seconds granularity to know how old this last record is
exchange_item.last = str(nowdt)
timeframes = ['1m', '1h', '1d']
for timeframe in timeframes:
# Get x (1m, 1h, 1d) timeframe OHLCV candles data, grouped per y (hour, day, month) in a table entry
key = exchange+'_'+market_symbol+'_'+timeframe
candles = myexch[exchange].fetchOHLCV(market_symbol, timeframe, limit=cl.limit(timeframe))
save_candles(exchange, market_symbol, timeframe, nowdt, candles)
# This needs to happen after the OHLCV entries have all been collected, to save the last_timestamp
exchange_item.save()
return ("OK")
if __name__ == "__main__":
logfile = 'synctickers.log'
logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y%m%d %H:%M:%S', filename=logfile)
if (os.environ.get('DYNAMODB_HOST') == None):
print ('DYNAMODB_HOST not set')
exit()
print ('DYNAMODB_HOST=' + os.environ.get('DYNAMODB_HOST'))
print ('logging output to ' + logfile)
error_count=0
while True:
try:
logging.info(lambda_handler(None, None))
except Exception:
logging.exception("Fatal error in main loop")
error_count+=1
sleep_time = 60 - (time.time()-11) % 60
logging.info("sleeping %f error count %d" %(sleep_time, error_count))
time.sleep(sleep_time) | []
| []
| [
"DYNAMODB_HOST"
]
| [] | ["DYNAMODB_HOST"] | python | 1 | 0 | |
main.go | package main
import (
"database/sql"
"flag"
"fmt"
"log"
"net"
"os"
"strings"
"text/tabwriter"
"github.com/go-sql-driver/mysql"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
type viaSSHDialer struct {
client *ssh.Client
}
func (vsd *viaSSHDialer) Dial(addr string) (net.Conn, error) {
return vsd.client.Dial("tcp", addr)
}
func main() {
sshHost := flag.String("ssh-host", "", "defines SSH jump host name for MySQL connection") // SSH Server Hostname/IP
sshPort := flag.Int("ssh-port", 22, "defines SSH jump host port for MySQL connection") // SSH Port
sshUser := flag.String("ssh-user", "", "defines SSH username for jump host") // SSH Username
sshPass := flag.String("ssh-password", "", "defines password for jump host") // Empty string for no password
dbUser := flag.String("db-user", "", "DB user name") // DB username
dbPass := flag.String("db-password", "", "DB password") // DB Password
dbHost := flag.String("db-host", "127.0.0.1:3306", "DB host name (including port)") // DB Hostname/IP
dbName := flag.String("db-name", "", "DB name") // Database name
dbQuery := flag.String("db-query", "", "DB query to run")
flag.Parse()
var agentClient agent.Agent
// Establish a connection to the local ssh-agent
if conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
defer conn.Close()
// Create a new instance of the ssh agent
agentClient = agent.NewClient(conn)
}
// The client configuration with configuration option to use the ssh-agent
sshConfig := &ssh.ClientConfig{
User: *sshUser,
Auth: []ssh.AuthMethod{},
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return nil
},
}
// When the agentClient connection succeeded, add them as AuthMethod
if agentClient != nil {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeysCallback(agentClient.Signers))
}
// When there's a non empty password add the password AuthMethod
if *sshPass != "" {
sshConfig.Auth = append(sshConfig.Auth, ssh.PasswordCallback(func() (string, error) {
return *sshPass, nil
}))
}
// Connect to the SSH Server
sshsrv := fmt.Sprintf("%s:%d", *sshHost, *sshPort)
sshcon, err := ssh.Dial("tcp", sshsrv, sshConfig)
if err != nil {
log.Fatalf("error when connecting SSH server %s: %v\n", sshsrv, err)
}
defer sshcon.Close()
// Now we register the ViaSSHDialer with the ssh connection as a parameter
mysql.RegisterDial("mysql+tcp", (&viaSSHDialer{sshcon}).Dial)
// And now we can use our new driver with the regular mysql connection string tunneled through the SSH connection
dbconn := fmt.Sprintf("%s:%s@mysql+tcp(%s)/%s", *dbUser, *dbPass, *dbHost, *dbName)
db, err := sql.Open("mysql", dbconn)
if err != nil {
log.Fatalf("error when connecting to DB server '%s': %v\n", *dbHost, err)
}
fmt.Printf("Successfully connected to the db %s\n", *dbHost)
rows, err := db.Query(*dbQuery)
if err != nil {
log.Fatalf("error when running query\n'%s'\n%v\n", *dbQuery, err)
}
cols, err := rows.Columns()
if err != nil {
log.Fatalf("error when getting list of columns: %v\n", err)
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.Debug|tabwriter.TabIndent)
h := strings.Join(cols, "\t")
fmt.Fprintln(w, h)
w.Flush()
fmt.Fprintln(os.Stdout, strings.Repeat("-", len(h)))
vals := make([]vscanner, len(cols))
pointers := make([]interface{}, len(cols))
args := make([]interface{}, len(cols))
valf := ""
for i := 0; i < len(vals); i++ {
vals[i] = vscanner("")
pointers[i] = &vals[i]
valf += "%v"
if i < len(vals)-1 {
valf += "\t"
continue
}
valf += "\n"
}
for rows.Next() {
rows.Scan(pointers...)
for i := range pointers {
args[i] = *pointers[i].(*vscanner)
}
fmt.Fprintf(w, valf, args...)
}
w.Flush()
if err := rows.Close(); err != nil {
log.Fatalf("error when closing dataset: %v\n", err)
}
if err := db.Close(); err != nil {
log.Fatalf("error when closing database connection: %v\n", err)
}
}
type vscanner string
func (v *vscanner) Scan(src interface{}) error {
var source string
switch src.(type) {
case string:
source = src.(string)
case []byte:
source = string(src.([]byte))
default:
return fmt.Errorf("unknown type %T", src)
}
*v = vscanner(source)
return nil
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
viper.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Viper is an application configuration system.
// It believes that applications can be configured a variety of ways
// via flags, ENVIRONMENT variables, configuration files retrieved
// from the file system, or a remote key/value store.
// Each item takes precedence over the item below it:
// overrides
// flag
// env
// config
// key/value store
// default
package viper
import (
"bytes"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/printer"
"github.com/magiconair/properties"
"github.com/mitchellh/mapstructure"
"github.com/pelletier/go-toml"
"github.com/spf13/afero"
"github.com/spf13/cast"
jww "github.com/spf13/jwalterweatherman"
"github.com/spf13/pflag"
"github.com/subosito/gotenv"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"
)
//add by kalrey
func (v *Viper) toLower(src string) string {
if v.insensitivise {
return strings.ToLower(src)
} else {
return src
}
}
// ConfigMarshalError happens when failing to marshal the configuration.
type ConfigMarshalError struct {
err error
}
// Error returns the formatted configuration error.
func (e ConfigMarshalError) Error() string {
return fmt.Sprintf("While marshaling config: %s", e.err.Error())
}
var v *Viper
type RemoteResponse struct {
Value []byte
Error error
}
func init() {
v = New()
}
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
var RemoteConfig remoteConfigFactory
// UnsupportedConfigError denotes encountering an unsupported
// configuration filetype.
type UnsupportedConfigError string
// Error returns the formatted configuration error.
func (str UnsupportedConfigError) Error() string {
return fmt.Sprintf("Unsupported Config Type %q", string(str))
}
// UnsupportedRemoteProviderError denotes encountering an unsupported remote
// provider. Currently only etcd and Consul are supported.
type UnsupportedRemoteProviderError string
// Error returns the formatted remote provider error.
func (str UnsupportedRemoteProviderError) Error() string {
return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
}
// RemoteConfigError denotes encountering an error while trying to
// pull the configuration from the remote provider.
type RemoteConfigError string
// Error returns the formatted remote provider error
func (rce RemoteConfigError) Error() string {
return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
}
// ConfigFileNotFoundError denotes failing to find configuration file.
type ConfigFileNotFoundError struct {
name, locations string
}
// Error returns the formatted configuration error.
func (fnfe ConfigFileNotFoundError) Error() string {
return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
}
// ConfigFileAlreadyExistsError denotes failure to write new configuration file.
type ConfigFileAlreadyExistsError string
// Error returns the formatted error when configuration already exists.
func (faee ConfigFileAlreadyExistsError) Error() string {
return fmt.Sprintf("Config File %q Already Exists", string(faee))
}
// A DecoderConfigOption can be passed to viper.Unmarshal to configure
// mapstructure.DecoderConfig options
type DecoderConfigOption func(*mapstructure.DecoderConfig)
// DecodeHook returns a DecoderConfigOption which overrides the default
// DecoderConfig.DecodeHook value, the default is:
//
// mapstructure.ComposeDecodeHookFunc(
// mapstructure.StringToTimeDurationHookFunc(),
// mapstructure.StringToSliceHookFunc(","),
// )
func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.DecodeHook = hook
}
}
// Viper is a prioritized configuration registry. It
// maintains a set of configuration sources, fetches
// values to populate those, and provides them according
// to the source's priority.
// The priority of the sources is the following:
// 1. overrides
// 2. flags
// 3. env. variables
// 4. config file
// 5. key/value store
// 6. defaults
//
// For example, if values from the following sources were loaded:
//
// Defaults : {
// "secret": "",
// "user": "default",
// "endpoint": "https://localhost"
// }
// Config : {
// "user": "root"
// "secret": "defaultsecret"
// }
// Env : {
// "secret": "somesecretkey"
// }
//
// The resulting config will have the following values:
//
// {
// "secret": "somesecretkey",
// "user": "root",
// "endpoint": "https://localhost"
// }
//
// Note: Vipers are not safe for concurrent Get() and Set() operations.
type Viper struct {
// Delimiter that separates a list of keys
// used to access a nested value in one go
keyDelim string
// A set of paths to look for the config file in
configPaths []string
// The filesystem to read config from.
fs afero.Fs
// A set of remote providers to search for the configuration
remoteProviders []*defaultRemoteProvider
// Name of file to look for inside the path
configName string
configFile string
configType string
configPermissions os.FileMode
envPrefix string
automaticEnvApplied bool
envKeyReplacer StringReplacer
allowEmptyEnv bool
config map[string]interface{}
override map[string]interface{}
defaults map[string]interface{}
kvstore map[string]interface{}
pflags map[string]FlagValue
env map[string][]string
aliases map[string]string
typeByDefValue bool
// Store read properties on the object so that we can write back in order with comments.
// This will only be used if the configuration read is a properties file.
properties *properties.Properties
onConfigChange func(fsnotify.Event)
//add by kalrey
insensitivise bool
}
// New returns an initialized Viper instance.
func New() *Viper {
v := new(Viper)
v.keyDelim = "."
v.configName = "config"
v.configPermissions = os.FileMode(0644)
v.fs = afero.NewOsFs()
v.config = make(map[string]interface{})
v.override = make(map[string]interface{})
v.defaults = make(map[string]interface{})
v.kvstore = make(map[string]interface{})
v.pflags = make(map[string]FlagValue)
v.env = make(map[string][]string)
v.aliases = make(map[string]string)
v.typeByDefValue = false
v.insensitivise = true
return v
}
// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney.
// If you're unfamiliar with this style,
// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis.
type Option interface {
apply(v *Viper)
}
type optionFunc func(v *Viper)
func (fn optionFunc) apply(v *Viper) {
fn(v)
}
//add by kalrey
func Insensitivise(insensitivise bool) {
v.Insensitivise(insensitivise)
}
//add by kalrey
func (v *Viper) Insensitivise(insensitivise bool) {
v.insensitivise = insensitivise
}
// KeyDelimiter sets the delimiter used for determining key parts.
// By default it's value is ".".
func KeyDelimiter(d string) Option {
return optionFunc(func(v *Viper) {
v.keyDelim = d
})
}
// StringReplacer applies a set of replacements to a string.
type StringReplacer interface {
// Replace returns a copy of s with all replacements performed.
Replace(s string) string
}
// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys.
func EnvKeyReplacer(r StringReplacer) Option {
return optionFunc(func(v *Viper) {
v.envKeyReplacer = r
})
}
// NewWithOptions creates a new Viper instance.
func NewWithOptions(opts ...Option) *Viper {
v := New()
for _, opt := range opts {
opt.apply(v)
}
return v
}
// Reset is intended for testing, will reset all to default settings.
// In the public interface for the viper package so applications
// can use it in their testing as well.
func Reset() {
v = New()
SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
}
type defaultRemoteProvider struct {
provider string
endpoint string
path string
secretKeyring string
}
func (rp defaultRemoteProvider) Provider() string {
return rp.provider
}
func (rp defaultRemoteProvider) Endpoint() string {
return rp.endpoint
}
func (rp defaultRemoteProvider) Path() string {
return rp.path
}
func (rp defaultRemoteProvider) SecretKeyring() string {
return rp.secretKeyring
}
// RemoteProvider stores the configuration necessary
// to connect to a remote key/value store.
// Optional secretKeyring to unencrypt encrypted values
// can be provided.
type RemoteProvider interface {
Provider() string
Endpoint() string
Path() string
SecretKeyring() string
}
// SupportedExts are universally supported extensions.
var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
// SupportedRemoteProviders are universally supported remote providers.
var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
v.onConfigChange = run
}
func WatchConfig() { v.WatchConfig() }
func (v *Viper) WatchConfig() {
initWG := sync.WaitGroup{}
initWG.Add(1)
go func() {
watcher, err := newWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
filename, err := v.getConfigFile()
if err != nil {
log.Printf("error: %v\n", err)
initWG.Done()
return
}
configFile := filepath.Clean(filename)
configDir, _ := filepath.Split(configFile)
realConfigFile, _ := filepath.EvalSymlinks(filename)
eventsWG := sync.WaitGroup{}
eventsWG.Add(1)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok { // 'Events' channel is closed
eventsWG.Done()
return
}
currentConfigFile, _ := filepath.EvalSymlinks(filename)
// we only care about the config file with the following cases:
// 1 - if the config file was modified or created
// 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
const writeOrCreateMask = fsnotify.Write | fsnotify.Create
if (filepath.Clean(event.Name) == configFile &&
event.Op&writeOrCreateMask != 0) ||
(currentConfigFile != "" && currentConfigFile != realConfigFile) {
realConfigFile = currentConfigFile
err := v.ReadInConfig()
if err != nil {
log.Printf("error reading config file: %v\n", err)
}
if v.onConfigChange != nil {
v.onConfigChange(event)
}
} else if filepath.Clean(event.Name) == configFile &&
event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
eventsWG.Done()
return
}
case err, ok := <-watcher.Errors:
if ok { // 'Errors' channel is not closed
log.Printf("watcher error: %v\n", err)
}
eventsWG.Done()
return
}
}
}()
watcher.Add(configDir)
initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
eventsWG.Wait() // now, wait for event loop to end in this go-routine...
}()
initWG.Wait() // make sure that the go routine above fully ended before returning
}
// SetConfigFile explicitly defines the path, name and extension of the config file.
// Viper will use this and not check any of the config paths.
func SetConfigFile(in string) { v.SetConfigFile(in) }
func (v *Viper) SetConfigFile(in string) {
if in != "" {
v.configFile = in
}
}
// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
// E.g. if your prefix is "spf", the env registry will look for env
// variables that start with "SPF_".
func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
func (v *Viper) SetEnvPrefix(in string) {
if in != "" {
v.envPrefix = in
}
}
func (v *Viper) mergeWithEnvPrefix(in string) string {
if v.envPrefix != "" {
return strings.ToUpper(v.envPrefix + "_" + in)
}
return strings.ToUpper(in)
}
// AllowEmptyEnv tells Viper to consider set,
// but empty environment variables as valid values instead of falling back.
// For backward compatibility reasons this is false by default.
func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
v.allowEmptyEnv = allowEmptyEnv
}
// TODO: should getEnv logic be moved into find(). Can generalize the use of
// rewriting keys many things, Ex: Get('someKey') -> some_key
// (camel case to snake case for JSON keys perhaps)
// getEnv is a wrapper around os.Getenv which replaces characters in the original
// key. This allows env vars which have different keys than the config object
// keys.
func (v *Viper) getEnv(key string) (string, bool) {
if v.envKeyReplacer != nil {
key = v.envKeyReplacer.Replace(key)
}
val, ok := os.LookupEnv(key)
return val, ok && (v.allowEmptyEnv || val != "")
}
// ConfigFileUsed returns the file used to populate the config registry.
func ConfigFileUsed() string { return v.ConfigFileUsed() }
func (v *Viper) ConfigFileUsed() string { return v.configFile }
// AddConfigPath adds a path for Viper to search for the config file in.
// Can be called multiple times to define multiple search paths.
func AddConfigPath(in string) { v.AddConfigPath(in) }
func (v *Viper) AddConfigPath(in string) {
if in != "" {
absin := absPathify(in)
jww.INFO.Println("adding", absin, "to paths to search")
if !stringInSlice(absin, v.configPaths) {
v.configPaths = append(v.configPaths, absin)
}
}
}
// AddRemoteProvider adds a remote configuration source.
// Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
func AddRemoteProvider(provider, endpoint, path string) error {
return v.AddRemoteProvider(provider, endpoint, path)
}
func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
// AddSecureRemoteProvider adds a remote configuration source.
// Secure Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
// Secure Remote Providers are implemented with github.com/bketelsen/crypt
func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
}
func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
secretKeyring: secretkeyring,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
for _, y := range v.remoteProviders {
if reflect.DeepEqual(y, p) {
return true
}
}
return false
}
// searchMap recursively searches for a value for path in source map.
// Returns nil if not found.
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
next, ok := source[path[0]]
if ok {
// Fast path
if len(path) == 1 {
return next
}
// Nested case
switch next.(type) {
case map[interface{}]interface{}:
return v.searchMap(cast.ToStringMap(next), path[1:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
return v.searchMap(next.(map[string]interface{}), path[1:])
default:
// got a value but nested key expected, return "nil" for not found
return nil
}
}
return nil
}
// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice.
//
// While searchMap() considers each path element as a single map key or slice index, this
// function searches for, and prioritizes, merged path elements.
// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
// is also defined, this latter value is returned for path ["foo", "bar"].
//
// This should be useful only at config level (other maps may not contain dots
// in their keys).
//
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
// search for path prefixes, starting from the longest one
for i := len(path); i > 0; i-- {
prefixKey := v.toLower(strings.Join(path[0:i], v.keyDelim))
var val interface{}
switch sourceIndexable := source.(type) {
case []interface{}:
val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path)
case map[string]interface{}:
val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path)
}
if val != nil {
return val
}
}
// not found
return nil
}
// searchSliceWithPathPrefixes searches for a value for path in sourceSlice
//
// This function is part of the searchIndexableWithPathPrefixes recurring search and
// should not be called directly from functions other than searchIndexableWithPathPrefixes.
func (v *Viper) searchSliceWithPathPrefixes(
sourceSlice []interface{},
prefixKey string,
pathIndex int,
path []string,
) interface{} {
// if the prefixKey is not a number or it is out of bounds of the slice
index, err := strconv.Atoi(prefixKey)
if err != nil || len(sourceSlice) <= index {
return nil
}
next := sourceSlice[index]
// Fast path
if pathIndex == len(path) {
return next
}
switch n := next.(type) {
case map[interface{}]interface{}:
return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
case map[string]interface{}, []interface{}:
return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
default:
// got a value but nested key expected, do nothing and look for next prefix
}
// not found
return nil
}
// searchMapWithPathPrefixes searches for a value for path in sourceMap
//
// This function is part of the searchIndexableWithPathPrefixes recurring search and
// should not be called directly from functions other than searchIndexableWithPathPrefixes.
func (v *Viper) searchMapWithPathPrefixes(
sourceMap map[string]interface{},
prefixKey string,
pathIndex int,
path []string,
) interface{} {
next, ok := sourceMap[prefixKey]
if !ok {
return nil
}
// Fast path
if pathIndex == len(path) {
return next
}
// Nested case
switch n := next.(type) {
case map[interface{}]interface{}:
return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:])
case map[string]interface{}, []interface{}:
return v.searchIndexableWithPathPrefixes(n, path[pathIndex:])
default:
// got a value but nested key expected, do nothing and look for next prefix
}
// not found
return nil
}
// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
// on its path in the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
var parentVal interface{}
for i := 1; i < len(path); i++ {
parentVal = v.searchMap(m, path[0:i])
if parentVal == nil {
// not found, no need to add more path elements
return ""
}
switch parentVal.(type) {
case map[interface{}]interface{}:
continue
case map[string]interface{}:
continue
default:
// parentVal is a regular value which shadows "path"
return strings.Join(path[0:i], v.keyDelim)
}
}
return ""
}
// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
// in a sub-path of the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
// unify input map
var m map[string]interface{}
switch mi.(type) {
case map[string]string, map[string]FlagValue:
m = cast.ToStringMap(mi)
default:
return ""
}
// scan paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := m[parentKey]; ok {
return parentKey
}
}
return ""
}
// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
// in the environment, when automatic env is on.
// e.g., if "foo.bar" has a value in the environment, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
return parentKey
}
}
return ""
}
// SetTypeByDefaultValue enables or disables the inference of a key value's
// type when the Get function is used based upon a key's default value as
// opposed to the value returned based on the normal fetch logic.
//
// For example, if a key has a default value of []string{} and the same key
// is set via an environment variable to "a b c", a call to the Get function
// would return a string slice for the key if the key's type is inferred by
// the default value and the Get function would return:
//
// []string {"a", "b", "c"}
//
// Otherwise the Get function would return:
//
// "a b c"
func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
func (v *Viper) SetTypeByDefaultValue(enable bool) {
v.typeByDefValue = enable
}
// GetViper gets the global Viper instance.
func GetViper() *Viper {
return v
}
// Get can retrieve any value given the key to use.
// Get is case-insensitive for a key.
// Get has the behavior of returning the value associated with the first
// place from where it is set. Viper will check in the following order:
// override, flag, env, config file, key/value store, default
//
// Get returns an interface. For a specific value use one of the Get____ methods.
func Get(key string) interface{} { return v.Get(key) }
func (v *Viper) Get(key string) interface{} {
lcaseKey := v.toLower(key)
val := v.find(lcaseKey, true)
if val == nil {
return nil
}
if v.typeByDefValue {
// TODO(bep) this branch isn't covered by a single test.
valType := val
path := strings.Split(lcaseKey, v.keyDelim)
defVal := v.searchMap(v.defaults, path)
if defVal != nil {
valType = defVal
}
switch valType.(type) {
case bool:
return cast.ToBool(val)
case string:
return cast.ToString(val)
case int32, int16, int8, int:
return cast.ToInt(val)
case uint:
return cast.ToUint(val)
case uint32:
return cast.ToUint32(val)
case uint64:
return cast.ToUint64(val)
case int64:
return cast.ToInt64(val)
case float64, float32:
return cast.ToFloat64(val)
case time.Time:
return cast.ToTime(val)
case time.Duration:
return cast.ToDuration(val)
case []string:
return cast.ToStringSlice(val)
case []int:
return cast.ToIntSlice(val)
}
}
return val
}
// Sub returns new Viper instance representing a sub tree of this instance.
// Sub is case-insensitive for a key.
func Sub(key string) *Viper { return v.Sub(key) }
func (v *Viper) Sub(key string) *Viper {
subv := New()
data := v.Get(key)
if data == nil {
return nil
}
if reflect.TypeOf(data).Kind() == reflect.Map {
subv.config = cast.ToStringMap(data)
return subv
}
return nil
}
// GetString returns the value associated with the key as a string.
func GetString(key string) string { return v.GetString(key) }
func (v *Viper) GetString(key string) string {
return cast.ToString(v.Get(key))
}
// GetBool returns the value associated with the key as a boolean.
func GetBool(key string) bool { return v.GetBool(key) }
func (v *Viper) GetBool(key string) bool {
return cast.ToBool(v.Get(key))
}
// GetInt returns the value associated with the key as an integer.
func GetInt(key string) int { return v.GetInt(key) }
func (v *Viper) GetInt(key string) int {
return cast.ToInt(v.Get(key))
}
// GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Viper) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
// GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Viper) GetInt64(key string) int64 {
return cast.ToInt64(v.Get(key))
}
// GetUint returns the value associated with the key as an unsigned integer.
func GetUint(key string) uint { return v.GetUint(key) }
func (v *Viper) GetUint(key string) uint {
return cast.ToUint(v.Get(key))
}
// GetUint32 returns the value associated with the key as an unsigned integer.
func GetUint32(key string) uint32 { return v.GetUint32(key) }
func (v *Viper) GetUint32(key string) uint32 {
return cast.ToUint32(v.Get(key))
}
// GetUint64 returns the value associated with the key as an unsigned integer.
func GetUint64(key string) uint64 { return v.GetUint64(key) }
func (v *Viper) GetUint64(key string) uint64 {
return cast.ToUint64(v.Get(key))
}
// GetFloat64 returns the value associated with the key as a float64.
func GetFloat64(key string) float64 { return v.GetFloat64(key) }
func (v *Viper) GetFloat64(key string) float64 {
return cast.ToFloat64(v.Get(key))
}
// GetTime returns the value associated with the key as time.
func GetTime(key string) time.Time { return v.GetTime(key) }
func (v *Viper) GetTime(key string) time.Time {
return cast.ToTime(v.Get(key))
}
// GetDuration returns the value associated with the key as a duration.
func GetDuration(key string) time.Duration { return v.GetDuration(key) }
func (v *Viper) GetDuration(key string) time.Duration {
return cast.ToDuration(v.Get(key))
}
// GetIntSlice returns the value associated with the key as a slice of int values.
func GetIntSlice(key string) []int { return v.GetIntSlice(key) }
func (v *Viper) GetIntSlice(key string) []int {
return cast.ToIntSlice(v.Get(key))
}
// GetStringSlice returns the value associated with the key as a slice of strings.
func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
func (v *Viper) GetStringSlice(key string) []string {
return cast.ToStringSlice(v.Get(key))
}
// GetStringMap returns the value associated with the key as a map of interfaces.
func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
func (v *Viper) GetStringMap(key string) map[string]interface{} {
return cast.ToStringMap(v.Get(key))
}
// GetStringMapString returns the value associated with the key as a map of strings.
func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
func (v *Viper) GetStringMapString(key string) map[string]string {
return cast.ToStringMapString(v.Get(key))
}
// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
return cast.ToStringMapStringSlice(v.Get(key))
}
// GetSizeInBytes returns the size of the value associated with the given key
// in bytes.
func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
func (v *Viper) GetSizeInBytes(key string) uint {
sizeStr := cast.ToString(v.Get(key))
return parseSizeInBytes(sizeStr)
}
// UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalKey(key, rawVal, opts...)
}
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
// on the fields of the structure are properly set.
func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.Unmarshal(rawVal, opts...)
}
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
// of time.Duration values & string slices
func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
c := &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
}
for _, opt := range opts {
opt(c)
}
return c
}
// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
func decode(input interface{}, config *mapstructure.DecoderConfig) error {
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
// in the destination struct.
func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalExact(rawVal, opts...)
}
func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
config := defaultDecoderConfig(rawVal, opts...)
config.ErrorUnused = true
return decode(v.AllSettings(), config)
}
// BindPFlags binds a full flag set to the configuration, using each flag's long
// name as the config key.
func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
return v.BindFlagValues(pflagValueSet{flags})
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
//
func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
if flag == nil {
return fmt.Errorf("flag for %q is nil", key)
}
return v.BindFlagValue(key, pflagValue{flag})
}
// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
// name as the config key.
func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
flags.VisitAll(func(flag FlagValue) {
if err = v.BindFlagValue(flag.Name(), flag); err != nil {
return
}
})
return nil
}
// BindFlagValue binds a specific key to a FlagValue.
func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
if flag == nil {
return fmt.Errorf("flag for %q is nil", key)
}
v.pflags[v.toLower(key)] = flag
return nil
}
// BindEnv binds a Viper key to a ENV variable.
// ENV variables are case sensitive.
// If only a key is provided, it will use the env key matching the key, uppercased.
// If more arguments are provided, they will represent the env variable names that
// should bind to this key and will be taken in the specified order.
// EnvPrefix will be used when set when env name is not provided.
func BindEnv(input ...string) error { return v.BindEnv(input...) }
func (v *Viper) BindEnv(input ...string) error {
if len(input) == 0 {
return fmt.Errorf("missing key to bind to")
}
key := v.toLower(input[0])
if len(input) == 1 {
v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key))
} else {
v.env[key] = append(v.env[key], input[1:]...)
}
return nil
}
// Given a key, find the value.
//
// Viper will check to see if an alias exists first.
// Viper will then check in the following order:
// flag, env, config file, key/value store.
// Lastly, if no value was found and flagDefault is true, and if the key
// corresponds to a flag, the flag's default value is returned.
//
// Note: this assumes a lower-cased key given.
func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
var (
val interface{}
exists bool
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
)
// compute the path through the nested maps to the nested value
if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
return nil
}
// if the requested key is an alias, then return the proper key
lcaseKey = v.realKey(lcaseKey)
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
// Set() override first
val = v.searchMap(v.override, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
return nil
}
// PFlag override next
flag, exists := v.pflags[lcaseKey]
if exists && flag.HasChanged() {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "stringToString":
return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
}
if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
return nil
}
// Env override next
if v.automaticEnvApplied {
// even if it hasn't been registered, if automaticEnv is used,
// check any Get request
if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
return val
}
if nested && v.isPathShadowedInAutoEnv(path) != "" {
return nil
}
}
envkeys, exists := v.env[lcaseKey]
if exists {
for _, envkey := range envkeys {
if val, ok := v.getEnv(envkey); ok {
return val
}
}
}
if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
return nil
}
// Config file next
val = v.searchIndexableWithPathPrefixes(v.config, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
return nil
}
// K/V store next
val = v.searchMap(v.kvstore, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
return nil
}
// Default next
val = v.searchMap(v.defaults, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
return nil
}
if flagDefault {
// last chance: if no value is found and a flag does exist for the key,
// get the flag's default value even if the flag's value has not been set.
if flag, exists := v.pflags[lcaseKey]; exists {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "stringToString":
return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
}
// last item, no need to check shadowing
}
return nil
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79
// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap
func stringToStringConv(val string) interface{} {
val = strings.Trim(val, "[]")
// An empty string would cause an empty map
if len(val) == 0 {
return map[string]interface{}{}
}
r := csv.NewReader(strings.NewReader(val))
ss, err := r.Read()
if err != nil {
return nil
}
out := make(map[string]interface{}, len(ss))
for _, pair := range ss {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 {
return nil
}
out[kv[0]] = kv[1]
}
return out
}
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key.
func IsSet(key string) bool { return v.IsSet(key) }
func (v *Viper) IsSet(key string) bool {
lcaseKey := v.toLower(key)
val := v.find(lcaseKey, false)
return val != nil
}
// AutomaticEnv makes Viper check if environment variables match any of the existing keys
// (config, default or flags). If matching env vars are found, they are loaded into Viper.
func AutomaticEnv() { v.AutomaticEnv() }
func (v *Viper) AutomaticEnv() {
v.automaticEnvApplied = true
}
// SetEnvKeyReplacer sets the strings.Replacer on the viper object
// Useful for mapping an environmental variable to a key that does
// not match it.
func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
v.envKeyReplacer = r
}
// RegisterAlias creates an alias that provides another accessor for the same key.
// This enables one to change a name without breaking the application.
func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
func (v *Viper) RegisterAlias(alias string, key string) {
v.registerAlias(alias, v.toLower(key))
}
func (v *Viper) registerAlias(alias string, key string) {
alias = v.toLower(alias)
if alias != key && alias != v.realKey(key) {
_, exists := v.aliases[alias]
if !exists {
// if we alias something that exists in one of the maps to another
// name, we'll never be able to get that value using the original
// name, so move the config value to the new realkey.
if val, ok := v.config[alias]; ok {
delete(v.config, alias)
v.config[key] = val
}
if val, ok := v.kvstore[alias]; ok {
delete(v.kvstore, alias)
v.kvstore[key] = val
}
if val, ok := v.defaults[alias]; ok {
delete(v.defaults, alias)
v.defaults[key] = val
}
if val, ok := v.override[alias]; ok {
delete(v.override, alias)
v.override[key] = val
}
v.aliases[alias] = key
}
} else {
jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
}
}
func (v *Viper) realKey(key string) string {
newkey, exists := v.aliases[key]
if exists {
jww.DEBUG.Println("Alias", key, "to", newkey)
return v.realKey(newkey)
}
return key
}
// InConfig checks to see if the given key (or an alias) is in the config file.
func InConfig(key string) bool { return v.InConfig(key) }
func (v *Viper) InConfig(key string) bool {
// if the requested key is an alias, then return the proper key
key = v.realKey(key)
_, exists := v.config[key]
return exists
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
func (v *Viper) SetDefault(key string, value interface{}) {
// If alias passed in, then set the proper default
key = v.realKey(v.toLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := v.toLower(path[len(path)-1])
deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// Set sets the value for the key in the override register.
// Set is case-insensitive for a key.
// Will be used instead of values obtained via
// flags, config file, ENV, default, or key/value store.
func Set(key string, value interface{}) { v.Set(key, value) }
func (v *Viper) Set(key string, value interface{}) {
// If alias passed in, then set the proper override
key = v.realKey(v.toLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := v.toLower(path[len(path)-1])
deepestMap := deepSearch(v.override, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// ReadInConfig will discover and load the configuration file from disk
// and key/value stores, searching in one of the defined paths.
func ReadInConfig() error { return v.ReadInConfig() }
func (v *Viper) ReadInConfig() error {
jww.INFO.Println("Attempting to read in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
jww.DEBUG.Println("Reading file: ", filename)
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
config := make(map[string]interface{})
err = v.unmarshalReader(bytes.NewReader(file), config)
if err != nil {
return err
}
v.config = config
return nil
}
// MergeInConfig merges a new configuration with an existing config.
func MergeInConfig() error { return v.MergeInConfig() }
func (v *Viper) MergeInConfig() error {
jww.INFO.Println("Attempting to merge in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
return v.MergeConfig(bytes.NewReader(file))
}
// ReadConfig will read a configuration file, setting existing keys to nil if the
// key does not exist in the file.
func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
func (v *Viper) ReadConfig(in io.Reader) error {
v.config = make(map[string]interface{})
return v.unmarshalReader(in, v.config)
}
// MergeConfig merges a new configuration with an existing config.
func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
func (v *Viper) MergeConfig(in io.Reader) error {
cfg := make(map[string]interface{})
if err := v.unmarshalReader(in, cfg); err != nil {
return err
}
return v.MergeConfigMap(cfg)
}
// MergeConfigMap merges the configuration from the map given with an existing config.
// Note that the map given may be modified.
func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
if v.config == nil {
v.config = make(map[string]interface{})
}
insensitiviseMap(cfg)
mergeMaps(cfg, v.config, nil)
return nil
}
// WriteConfig writes the current configuration to a file.
func WriteConfig() error { return v.WriteConfig() }
func (v *Viper) WriteConfig() error {
filename, err := v.getConfigFile()
if err != nil {
return err
}
return v.writeConfig(filename, true)
}
// SafeWriteConfig writes current configuration to file only if the file does not exist.
func SafeWriteConfig() error { return v.SafeWriteConfig() }
func (v *Viper) SafeWriteConfig() error {
if len(v.configPaths) < 1 {
return errors.New("missing configuration for 'configPath'")
}
return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType))
}
// WriteConfigAs writes current configuration to a given filename.
func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
func (v *Viper) WriteConfigAs(filename string) error {
return v.writeConfig(filename, true)
}
// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
func (v *Viper) SafeWriteConfigAs(filename string) error {
alreadyExists, err := afero.Exists(v.fs, filename)
if alreadyExists && err == nil {
return ConfigFileAlreadyExistsError(filename)
}
return v.writeConfig(filename, false)
}
func (v *Viper) writeConfig(filename string, force bool) error {
jww.INFO.Println("Attempting to write configuration to file.")
var configType string
ext := filepath.Ext(filename)
if ext != "" {
configType = ext[1:]
} else {
configType = v.configType
}
if configType == "" {
return fmt.Errorf("config type could not be determined for %s", filename)
}
if !stringInSlice(configType, SupportedExts) {
return UnsupportedConfigError(configType)
}
if v.config == nil {
v.config = make(map[string]interface{})
}
flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
if !force {
flags |= os.O_EXCL
}
f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
if err != nil {
return err
}
defer f.Close()
if err := v.marshalWriter(f, configType); err != nil {
return err
}
return f.Sync()
}
// Unmarshal a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
return v.unmarshalReader(in, c)
}
func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
buf := new(bytes.Buffer)
buf.ReadFrom(in)
switch v.toLower(v.getConfigType()) {
case "yaml", "yml":
if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "json":
if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "hcl":
obj, err := hcl.Parse(buf.String())
if err != nil {
return ConfigParseError{err}
}
if err = hcl.DecodeObject(&c, obj); err != nil {
return ConfigParseError{err}
}
case "toml":
tree, err := toml.LoadReader(buf)
if err != nil {
return ConfigParseError{err}
}
tmap := tree.ToMap()
for k, v := range tmap {
c[k] = v
}
case "dotenv", "env":
env, err := gotenv.StrictParse(buf)
if err != nil {
return ConfigParseError{err}
}
for k, v := range env {
c[k] = v
}
case "properties", "props", "prop":
v.properties = properties.NewProperties()
var err error
if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
return ConfigParseError{err}
}
for _, key := range v.properties.Keys() {
value, _ := v.properties.Get(key)
// recursively build nested maps
path := strings.Split(key, ".")
lastKey := v.toLower(path[len(path)-1])
deepestMap := deepSearch(c, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
case "ini":
cfg := ini.Empty()
err := cfg.Append(buf.Bytes())
if err != nil {
return ConfigParseError{err}
}
sections := cfg.Sections()
for i := 0; i < len(sections); i++ {
section := sections[i]
keys := section.Keys()
for j := 0; j < len(keys); j++ {
key := keys[j]
value := cfg.Section(section.Name()).Key(key.Name()).String()
c[section.Name()+"."+key.Name()] = value
}
}
}
if v.insensitivise {
insensitiviseMap(c)
}
return nil
}
// Marshal a map into Writer.
func (v *Viper) marshalWriter(f afero.File, configType string) error {
c := v.AllSettings()
switch configType {
case "json":
b, err := json.MarshalIndent(c, "", " ")
if err != nil {
return ConfigMarshalError{err}
}
_, err = f.WriteString(string(b))
if err != nil {
return ConfigMarshalError{err}
}
case "hcl":
b, err := json.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
ast, err := hcl.Parse(string(b))
if err != nil {
return ConfigMarshalError{err}
}
err = printer.Fprint(f, ast.Node)
if err != nil {
return ConfigMarshalError{err}
}
case "prop", "props", "properties":
if v.properties == nil {
v.properties = properties.NewProperties()
}
p := v.properties
for _, key := range v.AllKeys() {
_, _, err := p.Set(key, v.GetString(key))
if err != nil {
return ConfigMarshalError{err}
}
}
_, err := p.WriteComment(f, "#", properties.UTF8)
if err != nil {
return ConfigMarshalError{err}
}
case "dotenv", "env":
lines := []string{}
for _, key := range v.AllKeys() {
envName := strings.ToUpper(strings.Replace(key, ".", "_", -1))
val := v.Get(key)
lines = append(lines, fmt.Sprintf("%v=%v", envName, val))
}
s := strings.Join(lines, "\n")
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "toml":
t, err := toml.TreeFromMap(c)
if err != nil {
return ConfigMarshalError{err}
}
s := t.String()
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "yaml", "yml":
b, err := yaml.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
if _, err = f.WriteString(string(b)); err != nil {
return ConfigMarshalError{err}
}
case "ini":
keys := v.AllKeys()
cfg := ini.Empty()
ini.PrettyFormat = false
for i := 0; i < len(keys); i++ {
key := keys[i]
lastSep := strings.LastIndex(key, ".")
sectionName := key[:(lastSep)]
keyName := key[(lastSep + 1):]
if sectionName == "default" {
sectionName = ""
}
cfg.Section(sectionName).Key(keyName).SetValue(v.GetString(key))
}
cfg.WriteTo(f)
}
return nil
}
//removed by karey
// func keyExists(k string, m map[string]interface{}) string {
// lk := v.toLower(k)
// for mk := range m {
// lmk := v.toLower(mk)
// if lmk == lk {
// return mk
// }
// }
// return ""
// }
//add by kalrey
func keyExists(k string, m map[string]interface{}) string {
for mk := range m {
if mk == k {
return mk
}
}
return ""
}
func castToMapStringInterface(
src map[interface{}]interface{}) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[fmt.Sprintf("%v", k)] = v
}
return tgt
}
func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
// insistence on parsing nested structures as `map[interface{}]interface{}`
// instead of using a `string` as the key for nest structures beyond one level
// deep. Both map types are supported as there is a go-yaml fork that uses
// `map[string]interface{}` instead.
func mergeMaps(
src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
for sk, sv := range src {
tk := keyExists(sk, tgt)
if tk == "" {
jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
tv, ok := tgt[tk]
if !ok {
jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
svType := reflect.TypeOf(sv)
tvType := reflect.TypeOf(tv)
if tvType != nil && svType != tvType { // Allow for the target to be nil
jww.ERROR.Printf(
"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
continue
}
jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
switch ttv := tv.(type) {
case map[interface{}]interface{}:
jww.TRACE.Printf("merging maps (must convert)")
tsv := sv.(map[interface{}]interface{})
ssv := castToMapStringInterface(tsv)
stv := castToMapStringInterface(ttv)
mergeMaps(ssv, stv, ttv)
case map[string]interface{}:
jww.TRACE.Printf("merging maps")
mergeMaps(sv.(map[string]interface{}), ttv, nil)
default:
jww.TRACE.Printf("setting value")
tgt[tk] = sv
if itgt != nil {
itgt[tk] = sv
}
}
}
}
// ReadRemoteConfig attempts to get configuration from a remote source
// and read it in the remote configuration registry.
func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
func (v *Viper) ReadRemoteConfig() error {
return v.getKeyValueConfig()
}
func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
func (v *Viper) WatchRemoteConfig() error {
return v.watchKeyValueConfig()
}
func (v *Viper) WatchRemoteConfigOnChannel() error {
return v.watchKeyValueConfigOnChannel()
}
// Retrieve the first found remote configuration.
func (v *Viper) getKeyValueConfig() error {
if RemoteConfig == nil {
return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
}
for _, rp := range v.remoteProviders {
val, err := v.getRemoteConfig(rp)
if err != nil {
jww.ERROR.Printf("get remote config: %s", err)
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Get(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfigOnChannel() error {
for _, rp := range v.remoteProviders {
respc, _ := RemoteConfig.WatchChannel(rp)
// Todo: Add quit channel
go func(rc <-chan *RemoteResponse) {
for {
b := <-rc
reader := bytes.NewReader(b.Value)
v.unmarshalReader(reader, v.kvstore)
}
}(respc)
return nil
}
return RemoteConfigError("No Files Found")
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfig() error {
for _, rp := range v.remoteProviders {
val, err := v.watchRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Watch(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// AllKeys returns all keys holding a value, regardless of where they are set.
// Nested keys are returned with a v.keyDelim separator
func AllKeys() []string { return v.AllKeys() }
func (v *Viper) AllKeys() []string {
m := map[string]bool{}
// add all paths, by order of descending priority to ensure correct shadowing
m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
m = v.flattenAndMergeMap(m, v.override, "")
m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env))
m = v.flattenAndMergeMap(m, v.config, "")
m = v.flattenAndMergeMap(m, v.kvstore, "")
m = v.flattenAndMergeMap(m, v.defaults, "")
// convert set of paths to list
a := make([]string, 0, len(m))
for x := range m {
a = append(a, x)
}
return a
}
// flattenAndMergeMap recursively flattens the given map into a map[string]bool
// of key paths (used as a set, easier to manipulate than a []string):
// - each path is merged into a single key string, delimited with v.keyDelim
// - if a path is shadowed by an earlier value in the initial shadow map,
// it is skipped.
// The resulting set of paths is merged to the given shadow set at the same time.
func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
if shadow != nil && prefix != "" && shadow[prefix] {
// prefix is shadowed => nothing more to flatten
return shadow
}
if shadow == nil {
shadow = make(map[string]bool)
}
var m2 map[string]interface{}
if prefix != "" {
prefix += v.keyDelim
}
for k, val := range m {
fullKey := prefix + k
switch val.(type) {
case map[string]interface{}:
m2 = val.(map[string]interface{})
case map[interface{}]interface{}:
m2 = cast.ToStringMap(val)
default:
// immediate value
shadow[v.toLower(fullKey)] = true
continue
}
// recursively merge to shadow map
shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
}
return shadow
}
// mergeFlatMap merges the given maps, excluding values of the second map
// shadowed by values from the first map.
func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
// scan keys
outer:
for k := range m {
path := strings.Split(k, v.keyDelim)
// scan intermediate paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if shadow[parentKey] {
// path is shadowed, continue
continue outer
}
}
// add key
shadow[v.toLower(k)] = true
}
return shadow
}
// AllSettings merges all settings and returns them as a map[string]interface{}.
func AllSettings() map[string]interface{} { return v.AllSettings() }
func (v *Viper) AllSettings() map[string]interface{} {
m := map[string]interface{}{}
// start from the list of keys, and construct the map one value at a time
for _, k := range v.AllKeys() {
value := v.Get(k)
if value == nil {
// should not happen, since AllKeys() returns only keys holding a value,
// check just in case anything changes
continue
}
path := strings.Split(k, v.keyDelim)
lastKey := v.toLower(path[len(path)-1])
deepestMap := deepSearch(m, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
return m
}
// SetFs sets the filesystem to use to read configuration.
func SetFs(fs afero.Fs) { v.SetFs(fs) }
func (v *Viper) SetFs(fs afero.Fs) {
v.fs = fs
}
// SetConfigName sets name for the config file.
// Does not include extension.
func SetConfigName(in string) { v.SetConfigName(in) }
func (v *Viper) SetConfigName(in string) {
if in != "" {
v.configName = in
v.configFile = ""
}
}
// SetConfigType sets the type of the configuration returned by the
// remote source, e.g. "json".
func SetConfigType(in string) { v.SetConfigType(in) }
func (v *Viper) SetConfigType(in string) {
if in != "" {
v.configType = in
}
}
// SetConfigPermissions sets the permissions for the config file.
func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) }
func (v *Viper) SetConfigPermissions(perm os.FileMode) {
v.configPermissions = perm.Perm()
}
func (v *Viper) getConfigType() string {
if v.configType != "" {
return v.configType
}
cf, err := v.getConfigFile()
if err != nil {
return ""
}
ext := filepath.Ext(cf)
if len(ext) > 1 {
return ext[1:]
}
return ""
}
func (v *Viper) getConfigFile() (string, error) {
if v.configFile == "" {
cf, err := v.findConfigFile()
if err != nil {
return "", err
}
v.configFile = cf
}
return v.configFile, nil
}
func (v *Viper) searchInPath(in string) (filename string) {
jww.DEBUG.Println("Searching for config in ", in)
for _, ext := range SupportedExts {
jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
return filepath.Join(in, v.configName+"."+ext)
}
}
if v.configType != "" {
if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
return filepath.Join(in, v.configName)
}
}
return ""
}
// Search all configPaths for any config file.
// Returns the first path that exists (and is a config file).
func (v *Viper) findConfigFile() (string, error) {
jww.INFO.Println("Searching for config in ", v.configPaths)
for _, cp := range v.configPaths {
file := v.searchInPath(cp)
if file != "" {
return file, nil
}
}
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
}
// Debug prints all configuration registries for debugging
// purposes.
func Debug() { v.Debug() }
func (v *Viper) Debug() {
fmt.Printf("Aliases:\n%#v\n", v.aliases)
fmt.Printf("Override:\n%#v\n", v.override)
fmt.Printf("PFlags:\n%#v\n", v.pflags)
fmt.Printf("Env:\n%#v\n", v.env)
fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
fmt.Printf("Config:\n%#v\n", v.config)
fmt.Printf("Defaults:\n%#v\n", v.defaults)
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
cryptoauthlib/python/setup.py | from setuptools import setup, Distribution
from setuptools.command.install import install
from setuptools.command.build_ext import build_ext
from setuptools.extension import Extension
import sys
import subprocess
import os
import glob
import shutil
from ctypes import cdll
_NAME = 'cryptoauthlib'
_DESCRIPTION = 'Python Wrapper Library for Microchip Security Products'
_AUTHOR = 'Microchip Technology Inc'
_AUTHOR_EMAIL = '[email protected]'
_LICENSE = 'Other'
_URL = 'https://github.com/MicrochipTech/cryptoauthlib'
_VERSION = open('VERSION', 'r').read().strip()
_DOWNLOAD_URL = '%s/archive/%s.tar.gz' % (_URL, _VERSION)
_CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
]
_PROJECT_URLS = {
'Documentation': '%s/wiki/python' % _URL,
'Source': _URL,
'Tracker': '%s/issues' % _URL,
}
# Include the compiled library in the resulting distribution
_PACKAGE_DATA = {}
if sys.platform is 'win32':
_PACKAGE_DATA['libcryptoauth'] = ['cryptoauth.dll']
#elif sys.platform is 'darwin':
else:
_PACKAGE_DATA['libcryptoauth'] = ['libcryptoauth.so']
# See if this is being built from an sdist structure
if os.path.exists('lib') and os.path.exists('third_party'):
_sdist_build = True
else:
_sdist_build = False
# See if the library is already installed
try:
cdll.LoadLibrary('libcryptoauth.so')
_EXTENSIONS = None
except:
_EXTENSIONS = [Extension('cryptoauthlib', sources=[])]
def copy_udev_rules(target):
if _sdist_build:
rules = 'lib/hal/90-cryptohid.rules'
else:
rules = '../lib/hal/90-cryptohid.rules'
if not os.path.exists(target):
raise FileNotFoundError
if not os.path.exists(target + os.path.sep + os.path.basename(rules)):
shutil.copy(rules, target)
def install_udev_rules():
if sys.platform.startswith('linux'):
try:
copy_udev_rules('/etc/udev/rules.d')
except PermissionError:
print('Unable to write udev rules. Rerun install as sudo or install rules manually')
except:
print('Unable to install udev rules. See readme to manually install')
def load_readme():
with open('README.md', 'r') as f:
read_me = f.read()
if not _sdist_build:
with open('../README.md', 'r') as f:
notes = f.read()
read_me += notes[notes.find('Release notes'):notes.find('Host Device Support')]
with open('README.md', 'w') as f:
f.write(read_me)
return read_me
class CryptoAuthCommandBuildExt(build_ext):
def build_extension(self, ext):
# Suppress cmake output
devnull = open(os.devnull, 'r+b')
nousb = bool(os.environ.get('CRYPTOAUTHLIB_NOUSB', False))
# Check if CMAKE is installed
try:
subprocess.check_call(['cmake', '--version'], stdin=devnull, stdout=devnull, stderr=devnull, shell=False)
except OSError as e:
print("CMAKE must be installed on the system for this module to build the required extension e.g. 'apt-get install cmake' or 'yum install cmake'")
raise e
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)) + os.path.sep + _NAME)
setupdir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
cmakelist_path = os.path.abspath(setupdir + 'lib' if _sdist_build else '../lib')
if not sys.platform.startswith('linux'):
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
else:
build_args = []
cmake_args = ['-DATCA_HAL_CUSTOM=ON']
if not nousb:
cmake_args += ['-DATCA_HAL_KIT_HID=ON']
if 'win32' == sys.platform:
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_%s=' % cfg.upper() + extdir,
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_%s=' % cfg.upper() + extdir]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
else:
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir]
if sys.platform.startswith('linux'):
cmake_args += ['-DATCA_HAL_I2C=ON']
cmake_args += ['-DATCACERT_DEF_SRC={}atca_utils_sizes.c'.format(setupdir.replace('\\','/') if _sdist_build else '../test/')]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Configure the library
try:
subprocess.check_output(['cmake', cmakelist_path] + cmake_args, cwd=os.path.abspath(self.build_temp), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = e.output.decode('utf-8')
if 'usb' in msg:
msg += '\n\n USB libraries or headers were not located. If USB support is\n' \
' not required it can be disabled by setting the environment\n' \
' variable CRYPTOAUTHLIB_NOUSB to true before trying to install\n' \
' this package: \n\n' \
' $ export CRYPTOAUTHLIB_NOUSB=True\n\n' \
' Run setup.py clean before trying install again or use the pip \n' \
' option --no-cache-dir\n'
raise RuntimeError(msg)
# Build the library
try:
subprocess.check_output(['cmake', '--build', '.'] + build_args, cwd=os.path.abspath(self.build_temp), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if sys.version_info[0] <= 2:
raise RuntimeError(e.output) # Python 2 doesn't handle unicode exceptions
else:
raise RuntimeError(e.output.decode('utf-8'))
class CryptoAuthCommandInstall(install):
def run(self):
self.do_egg_install()
install_udev_rules()
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return (_EXTENSIONS is not None)
# Setuptools has some weird behavior when the install command class is extended
# but only affects bdist_* invocations which only applies to macos and windows
# and the extension is only required for linux
_COMMANDS = { 'build_ext': CryptoAuthCommandBuildExt }
#if sys.platform.startswith('linux'):
# _COMMANDS['install'] = CryptoAuthCommandInstall
if __name__ == '__main__':
setup(
name=_NAME,
packages=[_NAME],
version=_VERSION,
description=_DESCRIPTION,
long_description=load_readme(),
long_description_content_type='text/markdown',
url=_URL,
author=_AUTHOR,
author_email=_AUTHOR_EMAIL,
download_url=_DOWNLOAD_URL,
keywords='Microchip ATECC508A ATECC608A ECDSA ECDH',
project_urls=_PROJECT_URLS,
license=_LICENSE,
classifiers=_CLASSIFIERS,
package_data=_PACKAGE_DATA,
include_package_data=True,
distclass=BinaryDistribution,
cmdclass=_COMMANDS,
setup_requires=['setuptools>=38.6.0', 'wheel'],
install_requires=['enum34;python_version<"3.4"'],
ext_modules=_EXTENSIONS,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
zip_safe=False
)
| []
| []
| [
"CRYPTOAUTHLIB_NOUSB"
]
| [] | ["CRYPTOAUTHLIB_NOUSB"] | python | 1 | 0 | |
Quote-Tweets/QuoteTweetsDemo.java | import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
/*
* Sample code to demonstrate the use of the v2 Quote Tweets endpoint
* */
public class QuoteTweetsDemo {
// To set your environment variables in your terminal run the following line:
// export 'BEARER_TOKEN'='<your_bearer_token>'
public static void main(String args[]) throws IOException, URISyntaxException {
final String bearerToken = System.getenv("BEARER_TOKEN");
if (null != bearerToken) {
//Replace with Tweet ID below
String response = getTweets(20, bearerToken);
System.out.println(response);
} else {
System.out.println("There was a problem getting your bearer token. Please make sure you set the BEARER_TOKEN environment variable");
}
}
/*
* This method calls the v2 Quote Tweets endpoint by Tweet ID
* */
private static String getTweets(int tweetId, String bearerToken) throws IOException, URISyntaxException {
String tweetResponse = null;
HttpClient httpClient = HttpClients.custom()
.setDefaultRequestConfig(RequestConfig.custom()
.setCookieSpec(CookieSpecs.STANDARD).build())
.build();
URIBuilder uriBuilder = new URIBuilder(String.format("https://api.twitter.com/2/tweets/%s/quote_tweets", tweetId));
ArrayList<NameValuePair> queryParameters;
queryParameters = new ArrayList<>();
queryParameters.add(new BasicNameValuePair("tweet.fields", "created_at"));
uriBuilder.addParameters(queryParameters);
HttpGet httpGet = new HttpGet(uriBuilder.build());
httpGet.setHeader("Authorization", String.format("Bearer %s", bearerToken));
httpGet.setHeader("Content-Type", "application/json");
HttpResponse response = httpClient.execute(httpGet);
HttpEntity entity = response.getEntity();
if (null != entity) {
tweetResponse = EntityUtils.toString(entity, "UTF-8");
}
return tweetResponse;
}
}
| [
"\"BEARER_TOKEN\""
]
| []
| [
"BEARER_TOKEN"
]
| [] | ["BEARER_TOKEN"] | java | 1 | 0 | |
alpaca_trade_api/rest.py | import logging
import os
from typing import Iterator, List, Union
import requests
from requests.exceptions import HTTPError
import time
from enum import Enum
from .common import (
get_base_url,
get_data_url,
get_credentials,
get_api_version,
URL,
FLOAT,
)
from .entity import (
Bar,
Entity,
Account,
AccountConfigurations,
AccountActivity,
Asset,
Order,
Position,
BarSet,
Clock,
Calendar,
Aggs,
Trade,
Quote,
Watchlist,
PortfolioHistory,
)
from .entity_v2 import (
BarsV2,
SnapshotV2,
SnapshotsV2,
TradesV2,
TradeV2,
QuotesV2,
QuoteV2,
)
from . import polygon
logger = logging.getLogger(__name__)
Positions = List[Position]
Orders = List[Order]
Assets = List[Asset]
AccountActivities = List[AccountActivity]
Calendars = List[Calendar]
Watchlists = List[Watchlist]
TradeIterator = Iterator[Union[Trade, dict]]
QuoteIterator = Iterator[Union[Quote, dict]]
BarIterator = Iterator[Union[Bar, dict]]
DATA_V2_MAX_LIMIT = 10000 # max items per api call
class RetryException(Exception):
pass
class APIError(Exception):
"""
Represent API related error.
error.status_code will have http status code.
"""
def __init__(self, error, http_error=None):
super().__init__(error["message"])
self._error = error
self._http_error = http_error
@property
def code(self):
return self._error["code"]
@property
def status_code(self):
http_error = self._http_error
if http_error is not None and hasattr(http_error, "response"):
return http_error.response.status_code
@property
def request(self):
if self._http_error is not None:
return self._http_error.request
@property
def response(self):
if self._http_error is not None:
return self._http_error.response
class TimeFrame(Enum):
Day = "1Day"
Hour = "1Hour"
Minute = "1Min"
Sec = "1Sec"
class REST(object):
def __init__(
self,
key_id: str = None,
secret_key: str = None,
base_url: URL = None,
api_version: str = None,
oauth=None,
raw_data: bool = False,
):
"""
:param raw_data: should we return api response raw or wrap it with
Entity objects.
"""
self._key_id, self._secret_key, self._oauth = get_credentials(
key_id, secret_key, oauth
)
self._base_url: URL = URL(base_url or get_base_url())
self._api_version = get_api_version(api_version)
self._session = requests.Session()
self._use_raw_data = raw_data
self._retry = int(os.environ.get("APCA_RETRY_MAX", 3))
self._retry_wait = int(os.environ.get("APCA_RETRY_WAIT", 3))
self._retry_codes = [
int(o) for o in os.environ.get("APCA_RETRY_CODES", "429,504").split(",")
]
self.polygon = polygon.REST(
self._key_id, "staging" in self._base_url, self._use_raw_data
)
def _request(
self, method, path, data=None, base_url: URL = None, api_version: str = None
):
base_url = base_url or self._base_url
version = api_version if api_version else self._api_version
url: URL = URL(base_url + "/" + version + path)
headers = {}
if self._oauth:
headers["Authorization"] = "Bearer " + self._oauth
else:
headers["APCA-API-KEY-ID"] = self._key_id
headers["APCA-API-SECRET-KEY"] = self._secret_key
opts = {
"headers": headers,
# Since we allow users to set endpoint URL via env var,
# human error to put non-SSL endpoint could exploit
# uncanny issues in non-GET request redirecting http->https.
# It's better to fail early if the URL isn't right.
"allow_redirects": False,
}
if method.upper() in ["GET", "DELETE"]:
opts["params"] = data
else:
opts["json"] = data
retry = self._retry
if retry < 0:
retry = 0
while retry >= 0:
try:
return self._one_request(method, url, opts, retry)
except RetryException:
retry_wait = self._retry_wait
logger.warning(
"sleep {} seconds and retrying {} "
"{} more time(s)...".format(retry_wait, url, retry)
)
time.sleep(retry_wait)
retry -= 1
continue
def _one_request(self, method: str, url: URL, opts: dict, retry: int):
"""
Perform one request, possibly raising RetryException in the case
the response is 429. Otherwise, if error text contain "code" string,
then it decodes to json object and returns APIError.
Returns the body json in the 200 status.
"""
retry_codes = self._retry_codes
resp = self._session.request(method, url, **opts)
try:
resp.raise_for_status()
except HTTPError as http_error:
# retry if we hit Rate Limit
if resp.status_code in retry_codes and retry > 0:
raise RetryException()
if "code" in resp.text:
error = resp.json()
if "code" in error:
raise APIError(error, http_error)
else:
raise
if resp.text != "":
return resp.json()
return None
def get(self, path, data=None):
return self._request("GET", path, data)
def post(self, path, data=None):
return self._request("POST", path, data)
def put(self, path, data=None):
return self._request("PUT", path, data)
def patch(self, path, data=None):
return self._request("PATCH", path, data)
def delete(self, path, data=None):
return self._request("DELETE", path, data)
def data_get(self, path, data=None, api_version="v1"):
base_url: URL = get_data_url()
return self._request(
"GET",
path,
data,
base_url=base_url,
api_version=api_version,
)
def get_account(self) -> Account:
"""Get the account"""
resp = self.get("/account")
return self.response_wrapper(resp, Account)
def get_account_configurations(self) -> AccountConfigurations:
"""Get account configs"""
resp = self.get("/account/configurations")
return self.response_wrapper(resp, AccountConfigurations)
def update_account_configurations(
self,
no_shorting: bool = None,
dtbp_check: str = None,
trade_confirm_email: str = None,
suspend_trade: bool = None,
) -> AccountConfigurations:
"""
alpaca.markets/docs/api-documentation/api-v2/account-configuration/
Update account configs
:param dtbp_check: both, entry, or exit
:param trade_confirm_email: all or none
"""
params = {}
if no_shorting is not None:
params["no_shorting"] = no_shorting
if dtbp_check is not None:
params["dtbp_check"] = dtbp_check
if trade_confirm_email is not None:
params["trade_confirm_email"] = trade_confirm_email
if suspend_trade is not None:
params["suspend_trade"] = suspend_trade
resp = self.patch("/account/configurations", params)
return self.response_wrapper(resp, AccountConfigurations)
def list_orders(
self,
status: str = None,
limit: int = None,
after: str = None,
until: str = None,
direction: str = None,
params=None,
nested: bool = None,
symbols: List[str] = None,
) -> Orders:
"""
Get a list of orders
https://docs.alpaca.markets/web-api/orders/#get-a-list-of-orders
:param status: open, closed or all. Defaults to open.
:param limit: Defaults to 50 and max is 500
:param after: timestamp
:param until: timestamp
:param direction: asc or desc.
:param params: refer to documentation
:param nested: should the data be nested like json
:param symbols: list of str (symbols)
"""
if params is None:
params = dict()
if limit is not None:
params["limit"] = limit
if after is not None:
params["after"] = after
if until is not None:
params["until"] = until
if direction is not None:
params["direction"] = direction
if status is not None:
params["status"] = status
if nested is not None:
params["nested"] = nested
if symbols is not None:
params["symbols"] = ",".join(symbols)
url = "/orders"
resp = self.get(url, params)
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, Order) for o in resp]
def submit_order(
self,
symbol: str,
qty: float = None,
side: str = "buy",
type: str = "market",
time_in_force: str = "day",
limit_price: str = None,
stop_price: str = None,
client_order_id: str = None,
extended_hours: bool = None,
order_class: str = None,
take_profit: dict = None,
stop_loss: dict = None,
trail_price: str = None,
trail_percent: str = None,
notional: float = None,
):
"""
:param symbol: symbol or asset ID
:param qty: float. Mutually exclusive with "notional".
:param side: buy or sell
:param type: market, limit, stop, stop_limit or trailing_stop
:param time_in_force: day, gtc, opg, cls, ioc, fok
:param limit_price: str of float
:param stop_price: str of float
:param client_order_id:
:param extended_hours: bool. If true, order will be eligible to execute
in premarket/afterhours.
:param order_class: simple, bracket, oco or oto
:param take_profit: dict with field "limit_price" e.g
{"limit_price": "298.95"}
:param stop_loss: dict with fields "stop_price" and "limit_price" e.g
{"stop_price": "297.95", "limit_price": "298.95"}
:param trail_price: str of float
:param trail_percent: str of float
:param notional: float. Mutually exclusive with "qty".
"""
"""Request a new order"""
params = {
"symbol": symbol,
"side": side,
"type": type,
"time_in_force": time_in_force,
}
if qty is not None:
params["qty"] = qty
if notional is not None:
params["notional"] = notional
if limit_price is not None:
params["limit_price"] = FLOAT(limit_price)
if stop_price is not None:
params["stop_price"] = FLOAT(stop_price)
if client_order_id is not None:
params["client_order_id"] = client_order_id
if extended_hours is not None:
params["extended_hours"] = extended_hours
if order_class is not None:
params["order_class"] = order_class
if take_profit is not None:
if "limit_price" in take_profit:
take_profit["limit_price"] = FLOAT(take_profit["limit_price"])
params["take_profit"] = take_profit
if stop_loss is not None:
if "limit_price" in stop_loss:
stop_loss["limit_price"] = FLOAT(stop_loss["limit_price"])
if "stop_price" in stop_loss:
stop_loss["stop_price"] = FLOAT(stop_loss["stop_price"])
params["stop_loss"] = stop_loss
if trail_price is not None:
params["trail_price"] = trail_price
if trail_percent is not None:
params["trail_percent"] = trail_percent
resp = self.post("/orders", params)
return self.response_wrapper(resp, Order)
def get_order_by_client_order_id(self, client_order_id: str) -> Order:
"""Get an order by client order id"""
params = {
"client_order_id": client_order_id,
}
resp = self.get("/orders:by_client_order_id", params)
return self.response_wrapper(resp, Order)
def get_order(self, order_id: str, nested: bool = None) -> Order:
"""Get an order"""
params = {}
if nested is not None:
params["nested"] = nested
resp = self.get("/orders/{}".format(order_id), params)
return self.response_wrapper(resp, Order)
def replace_order(
self,
order_id: str,
qty: str = None,
limit_price: str = None,
stop_price: str = None,
trail: str = None,
time_in_force: str = None,
client_order_id: str = None,
) -> Order:
"""
:param order_id:
:param qty: str of int
:param limit_price: str of float
:param stop_price: str of float
:param trail: str of float, represents trailing_price or
trailing_percent. determined by the original order.
:param time_in_force: day, gtc, opg, cls, ioc, fok
note: you cannot replace type of order. so, it was trailing_stop(e.g)
it will remain trailing_stop.
"""
params = {}
if qty is not None:
params["qty"] = qty
if limit_price is not None:
params["limit_price"] = FLOAT(limit_price)
if stop_price is not None:
params["stop_price"] = FLOAT(stop_price)
if trail is not None:
params["trail"] = FLOAT(trail)
if time_in_force is not None:
params["time_in_force"] = time_in_force
if client_order_id is not None:
params["client_order_id"] = client_order_id
resp = self.patch("/orders/{}".format(order_id), params)
return self.response_wrapper(resp, Order)
def cancel_order(self, order_id: str) -> None:
"""Cancel an order"""
self.delete("/orders/{}".format(order_id))
def cancel_all_orders(self) -> None:
"""Cancel all open orders"""
self.delete("/orders")
def list_positions(self) -> Positions:
"""Get a list of open positions"""
resp = self.get("/positions")
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(p, Position) for p in resp]
def get_position(self, symbol: str) -> Position:
"""Get an open position"""
resp = self.get("/positions/{}".format(symbol))
return self.response_wrapper(resp, Position)
def close_position(
self,
symbol: str,
*,
qty: float = None,
# percentage: float = None # currently unsupported api
) -> Position:
"""Liquidates the position for the given symbol at market price"""
# if qty and percentage:
# raise Exception("Can't close position with qty and pecentage")
# elif qty:
# data = {'qty': qty}
# elif percentage:
# data = {'percentage': percentage}
# else:
# data = {}
if qty:
data = {"qty": qty}
else:
data = {}
resp = self.delete("/positions/{}".format(symbol), data=data)
return self.response_wrapper(resp, Position)
def close_all_positions(self) -> Positions:
"""Liquidates all open positions at market price"""
resp = self.delete("/positions")
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, Position) for o in resp]
def list_assets(self, status=None, asset_class=None) -> Assets:
"""Get a list of assets"""
params = {
"status": status,
"asset_class": asset_class,
}
resp = self.get("/assets", params)
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, Asset) for o in resp]
def get_asset(self, symbol: str) -> Asset:
"""Get an asset"""
resp = self.get("/assets/{}".format(symbol))
return self.response_wrapper(resp, Asset)
def get_barset(
self,
symbols,
timeframe: str,
limit: int = None,
start: str = None,
end: str = None,
after: str = None,
until: str = None,
) -> BarSet:
"""
read the documentation here:
https://alpaca.markets/docs/api-documentation/api-v2/market-data/bars/
Get BarSet(dict[str]->list[Bar])
:param symbols: The parameter symbols can be either a comma-split
string or a list of string. Each symbol becomes the key of the
returned value.
:param timeframe: One of minute, 1Min, 5Min, 15Min, day or 1D. minute
is an alias of 1Min. Similarly, day is of 1D.
:param limit: The maximum number of bars per symbol. It can be between
1 and 1000. Default is 100.
:param start: ISO Format str, ex: '2019-04-15T09:30:00-04:00' or
'2019-04-15'
:param end: ISO Format str
:param after: ISO Format str
:param until: ISO Format str
:return: BarSet
note: start can't be used with after. end cannot be used with until.
"""
if not isinstance(symbols, str):
symbols = ",".join(symbols)
params = {
"symbols": symbols,
}
if limit is not None:
params["limit"] = limit
if start is not None:
params["start"] = start
if end is not None:
params["end"] = end
if after is not None:
params["after"] = after
if until is not None:
params["until"] = until
resp = self.data_get("/bars/{}".format(timeframe), params)
return self.response_wrapper(resp, BarSet)
def get_aggs(
self, symbol: str, multiplier: int, timespan: str, _from: str, to: str
) -> Aggs:
"""
:param symbol: str eg AAPL
:param multiplier: must be 1
:param timespan: day or minute
:param _from: yyyy-mm-dd
:param to: yyyy-mm-dd
:return:
"""
resp = self.data_get(
"/aggs/ticker/{}/range/{}/{}/{}/{}".format(
symbol, multiplier, timespan, _from, to
)
)
return self.response_wrapper(resp, Aggs)
def get_last_trade(self, symbol: str) -> Trade:
"""
Get the last trade for the given symbol
"""
resp = self.data_get("/last/stocks/{}".format(symbol))
return self.response_wrapper(resp["last"], Trade)
def get_last_quote(self, symbol: str) -> Quote:
"""Get the last quote for the given symbol"""
resp = self.data_get("/last_quote/stocks/{}".format(symbol))
return self.response_wrapper(resp["last"], Quote)
def _data_get_v2(self, endpoint: str, symbol: str, **kwargs):
page_token = None
total_items = 0
limit = kwargs.get("limit")
while True:
actual_limit = None
if limit:
actual_limit = min(int(limit) - total_items, DATA_V2_MAX_LIMIT)
if actual_limit < 1:
break
data = kwargs
data["limit"] = actual_limit
data["page_token"] = page_token
resp = self.data_get(
"/stocks/{}/{}".format(symbol, endpoint), data=data, api_version="v2"
)
items = resp.get(endpoint, []) or []
for item in items:
yield item
total_items += len(items)
page_token = resp.get("next_page_token")
if not page_token:
break
def get_trades_iter(
self, symbol: str, start: str, end: str, limit: int = None, raw=False
) -> TradeIterator:
trades = self._data_get_v2("trades", symbol, start=start, end=end, limit=limit)
for trade in trades:
if raw:
yield trade
else:
yield self.response_wrapper(trade, Trade)
def get_trades(
self,
symbol: str,
start: str,
end: str,
limit: int = None,
) -> TradesV2:
trades = list(self.get_trades_iter(symbol, start, end, limit, raw=True))
return TradesV2(trades)
def get_quotes_iter(
self, symbol: str, start: str, end: str, limit: int = None, raw=False
) -> QuoteIterator:
quotes = self._data_get_v2("quotes", symbol, start=start, end=end, limit=limit)
for quote in quotes:
if raw:
yield quote
else:
yield self.response_wrapper(quote, Quote)
def get_quotes(
self,
symbol: str,
start: str,
end: str,
limit: int = None,
) -> QuotesV2:
quotes = list(self.get_quotes_iter(symbol, start, end, limit, raw=True))
return QuotesV2(quotes)
def get_bars_iter(
self,
symbol: str,
timeframe: TimeFrame,
start: str,
end: str,
adjustment: str = "raw",
limit: int = None,
raw=False,
) -> BarIterator:
bars = self._data_get_v2(
"bars",
symbol,
timeframe=timeframe.value,
adjustment=adjustment,
start=start,
end=end,
limit=limit,
)
for bar in bars:
if raw:
yield bar
else:
yield self.response_wrapper(bar, Bar)
def get_bars(
self,
symbol: str,
timeframe: TimeFrame,
start: str,
end: str,
adjustment: str = "raw",
limit: int = None,
) -> BarsV2:
bars = list(
self.get_bars_iter(
symbol, timeframe, start, end, adjustment, limit, raw=True
)
)
return BarsV2(bars)
def get_latest_trade(self, symbol: str) -> TradeV2:
"""
Get the latest trade for the given symbol
"""
resp = self.data_get(
"/stocks/{}/trades/latest".format(symbol), api_version="v2"
)
return self.response_wrapper(resp["trade"], TradeV2)
def get_latest_quote(self, symbol: str) -> QuoteV2:
"""Get the latest quote for the given symbol"""
resp = self.data_get(
"/stocks/{}/quotes/latest".format(symbol), api_version="v2"
)
return self.response_wrapper(resp["quote"], QuoteV2)
def get_snapshot(self, symbol: str) -> SnapshotV2:
"""Get the snapshot for the given symbol"""
resp = self.data_get("/stocks/{}/snapshot".format(symbol), api_version="v2")
return self.response_wrapper(resp, SnapshotV2)
def get_snapshots(self, symbols: List[str]) -> SnapshotsV2:
"""Get the snapshots for the given symbols"""
resp = self.data_get(
"/stocks/snapshots?symbols={}".format(",".join(symbols)), api_version="v2"
)
return self.response_wrapper(resp, SnapshotsV2)
def get_clock(self) -> Clock:
resp = self.get("/clock")
return self.response_wrapper(resp, Clock)
def get_activities(
self,
activity_types: str = None,
until: str = None,
after: str = None,
direction: str = None,
date: str = None,
page_size: int = None,
page_token: str = None,
) -> AccountActivities:
"""
go to alpaca.markets/docs/api-documentation/api-v2/account-activities/
:param activity_types: go to documnetation to see available types
:param until: isoformat timestamp
:param after: isoformat timestamp
:param direction: asc or sesc. default is desc
:param date: str. can't be sued with until/after
:param page_size:
:param page_token:
:return:
"""
url = "/account/activities"
params = {}
if isinstance(activity_types, list):
params["activity_types"] = ",".join(activity_types)
elif activity_types is not None:
url += "/{}".format(activity_types)
if after is not None:
params["after"] = after
if until is not None:
params["until"] = until
if direction is not None:
params["direction"] = direction
if date is not None:
params["date"] = date
if page_size is not None:
params["page_size"] = page_size
if page_token is not None:
params["page_token"] = page_token
resp = self.get(url, data=params)
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, AccountActivity) for o in resp]
def get_calendar(self, start: str = None, end: str = None) -> Calendars:
"""
:param start: isoformat date string eg '2006-01-02T15:04:05Z' or
'2006-01-02'
:param end: isoformat date string
"""
params = {}
if start is not None:
params["start"] = start
if end is not None:
params["end"] = end
resp = self.get("/calendar", data=params)
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, Calendar) for o in resp]
def get_watchlists(self) -> Watchlists:
"""Get the list of watchlists registered under the account"""
resp = self.get("/watchlists")
if self._use_raw_data:
return resp
else:
return [self.response_wrapper(o, Watchlist) for o in resp]
def get_watchlist(self, watchlist_id: str) -> Watchlist:
"""Get a watchlist identified by the ID"""
resp = self.get("/watchlists/{}".format((watchlist_id)))
return self.response_wrapper(resp, Watchlist)
def get_watchlist_by_name(self, watchlist_name: str) -> Watchlist:
"""Get a watchlist identified by its name"""
params = {
"name": watchlist_name,
}
resp = self.get("/watchlists:by_name", data=params)
return self.response_wrapper(resp, Watchlist)
def create_watchlist(self, watchlist_name: str, symbols=None) -> Watchlist:
"""Create a new watchlist with an optional initial set of assets"""
params = {
"name": watchlist_name,
}
if symbols is not None:
params["symbols"] = symbols
resp = self.post("/watchlists", data=params)
return self.response_wrapper(resp, Watchlist)
def add_to_watchlist(self, watchlist_id: str, symbol: str) -> Watchlist:
"""Add an asset to the watchlist"""
resp = self.post(
"/watchlists/{}".format(watchlist_id), data=dict(symbol=symbol)
)
return self.response_wrapper(resp, Watchlist)
def update_watchlist(
self, watchlist_id: str, name: str = None, symbols=None
) -> Watchlist:
"""Update a watchlist's name and/or asset list"""
params = {}
if name is not None:
params["name"] = name
if symbols is not None:
params["symbols"] = symbols
resp = self.put("/watchlists/{}".format(watchlist_id), data=params)
return self.response_wrapper(resp, Watchlist)
def delete_watchlist(self, watchlist_id: str) -> None:
"""Delete a watchlist identified by the ID permanently"""
self.delete("/watchlists/{}".format(watchlist_id))
def delete_from_watchlist(self, watchlist_id: str, symbol: str) -> None:
"""Remove an asset from the watchlist's asset list"""
self.delete("/watchlists/{}/{}".format(watchlist_id, symbol))
def get_portfolio_history(
self,
date_start: str = None,
date_end: str = None,
period: str = None,
timeframe=None,
extended_hours: bool = None,
) -> PortfolioHistory:
"""
alpaca.markets/docs/api-documentation/api-v2/portfolio-history/
:param date_start: YYYY-MM-DD
:param date_end: YYYY-MM-DD
:param period: The duration of the data in <number> + <unit>
such as 1D, where <unit> can be D for day, W for week,
M for month and A for year. Defaults to 1M.
:param timeframe: The resolution of time window. 1Min, 5Min, 15Min,
1H, or 1D
:param extended_hours: bool. If true, include extended hours in the
result.
"""
params = {}
if date_start is not None:
params["date_start"] = date_start
if date_end is not None:
params["date_end"] = date_end
if period is not None:
params["period"] = period
if timeframe is not None:
params["timeframe"] = timeframe
if extended_hours is not None:
params["extended_hours"] = extended_hours
resp = self.get("/account/portfolio/history", data=params)
return self.response_wrapper(resp, PortfolioHistory)
def __enter__(self):
return self
def close(self):
self._session.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def response_wrapper(self, obj, entity: Entity):
"""
To allow the user to get raw response from the api, we wrap all
functions with this method, checking if the user has set raw_data
bool. if they didn't, we wrap the response with an Entity object.
:param obj: response from server
:param entity: derivative object of Entity
:return:
"""
if self._use_raw_data:
return obj
else:
return entity(obj)
| []
| []
| [
"APCA_RETRY_MAX",
"APCA_RETRY_WAIT",
"APCA_RETRY_CODES"
]
| [] | ["APCA_RETRY_MAX", "APCA_RETRY_WAIT", "APCA_RETRY_CODES"] | python | 3 | 0 | |
utils/test_prediction_speed.py | import logging
import time
import datetime
import numpy as np
import pandas as pd
# Hack to get openMP working annoyingly.
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
print("Importing Keras and MDRNN.")
start_import = time.time()
import empi_mdrnn
import tensorflow as tf
from keras import backend as K
from tensorflow.contrib.training.python.training.hparam import HParams
print("Done. That took", time.time() - start_import, "seconds.")
def build_network(sess, compute_graph, net_config):
"""Build the MDRNN."""
empi_mdrnn.MODEL_DIR = "./models/"
K.set_session(sess)
with compute_graph.as_default():
net = empi_mdrnn.PredictiveMusicMDRNN(mode=empi_mdrnn.NET_MODE_RUN,
dimension=net_config.dimension,
n_hidden_units=net_config.units,
n_mixtures=net_config.mixes,
layers=net_config.layers)
#net.pi_temp = net_config.pi_temp
#net.sigma_temp = net_config.sigmatemp
print("MDRNN Loaded.")
return net
def request_rnn_prediction(input_value, net):
""" Accesses a single prediction from the RNN. """
start = time.time()
output_value = net.generate_touch(input_value)
time_delta = time.time() - start
#print("Prediction took:", time_delta)
return output_value, time_delta
def run_test(tests, net_config):
times = pd.DataFrame()
compute_graph = tf.Graph()
with compute_graph.as_default():
sess = tf.Session()
net = build_network(sess, compute_graph, net_config)
for i in range(tests):
## Predictions.
item = empi_mdrnn.random_sample(out_dim=net_config.dimension)
K.set_session(sess)
with compute_graph.as_default():
rnn_output, t = request_rnn_prediction(item, net)
out_dict = {
'time': t,
'mixes': net_config.mixes,
'layers': net_config.layers,
'units': net_config.units,
'dimension': net_config.dimension}
times = times.append(out_dict, ignore_index=True)
# clean up
K.clear_session()
sess.close()
return times
if __name__ == "__main__":
experiment_frames = []
# hparams = HParams(mixes=5, layers=2, units=64, dimension=2)
mdrnn_units = [64, 128, 256, 512]
dimensions = [2, 3, 4, 5, 6, 7, 8, 9]
for un in mdrnn_units:
for dim in dimensions:
hparams = HParams(mixes=5, layers=2, units=un, dimension=dim)
times = run_test(100, hparams)
experiment_frames.append(times)
total_experiment = pd.concat(experiment_frames, ignore_index=True)
total_experiment.to_csv("total_exp.csv")
print(total_experiment.describe())
# sysctl -n machdep.cpu.brand_string
| []
| []
| [
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
qa/pull-tester/rpc-tests.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:NajafbitTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_NAJAFBITD'):
ENABLE_NAJAFBITD=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "NAJAFBITD" not in os.environ:
os.environ["NAJAFBITD"] = buildDir + '/src/najafbitd' + EXEEXT
if "NAJAFBITCLI" not in os.environ:
os.environ["NAJAFBITCLI"] = buildDir + '/src/najafbit-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_NAJAFBITD == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and najafbitd must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `najafbit-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| []
| []
| [
"NAJAFBITD",
"NAJAFBITCLI"
]
| [] | ["NAJAFBITD", "NAJAFBITCLI"] | python | 2 | 0 | |
tests/test_util.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
from itertools import islice
import os
import re
import shutil
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
import textwrap
import time
from compat import unittest
from support import TempdirManager, DistlibTestCase, in_github_workflow
from distlib import DistlibException
from distlib.compat import cache_from_source
from distlib.util import (get_export_entry, ExportEntry, resolve,
get_cache_base, path_to_cache_dir, zip_dir,
parse_credentials, ensure_slash, split_filename,
EventMixin, Sequencer, unarchive, Progress,
iglob, RICH_GLOB, parse_requirement, get_extras,
Configurator, read_exports, write_exports,
FileOperator, is_string_sequence, get_package_data,
convert_path)
HERE = os.path.dirname(os.path.abspath(__file__))
IN_GITHUB_WORKFLOW = in_github_workflow()
class TestContainer(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class UtilTestCase(DistlibTestCase):
def check_entry(self, entry, name, prefix, suffix, flags):
self.assertEqual(entry.name, name)
self.assertEqual(entry.prefix, prefix)
self.assertEqual(entry.suffix, suffix)
self.assertEqual(entry.flags, flags)
def test_export_entry(self):
self.assertIsNone(get_export_entry('foo.py'))
self.assertIsNone(get_export_entry('foo.py='))
for spec in ('foo=foo:main', 'foo =foo:main', 'foo= foo:main',
'foo = foo:main'):
self.check_entry(get_export_entry(spec),
'foo', 'foo', 'main', [])
self.check_entry(get_export_entry('foo=foo.bar:main'),
'foo', 'foo.bar', 'main', [])
self.check_entry(get_export_entry('foo=foo.bar:main [a]'),
'foo', 'foo.bar', 'main', ['a'])
# See issue #127 - allow hyphens
self.check_entry(get_export_entry('foo=foo.bar:main [with-foo]'),
'foo', 'foo.bar', 'main', ['with-foo'])
self.check_entry(get_export_entry('foo=foo.bar:main [ a ]'),
'foo', 'foo.bar', 'main', ['a'])
self.check_entry(get_export_entry('foo=foo.bar:main [a=b, c=d,e, f=g]'),
'foo', 'foo.bar', 'main', ['a=b', 'c=d', 'e', 'f=g'])
self.check_entry(get_export_entry('foo=foo.bar:main [a=9, 9=8,e, f9=g8]'),
'foo', 'foo.bar', 'main', ['a=9', '9=8', 'e', 'f9=g8'])
self.check_entry(get_export_entry('foo=foo.bar:main[x]'),
'foo', 'foo.bar', 'main', ['x'])
self.check_entry(get_export_entry('foo=abc'), 'foo', 'abc', None, [])
self.check_entry(get_export_entry('smc++ = smcpp.frontend:console'), 'smc++',
'smcpp.frontend', 'console', [])
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x:y')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x ]')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x []')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [\\]')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [a=]')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [a,]')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [a,,b]')
self.assertRaises(DistlibException, get_export_entry, 'foo=foo.bar:x [a b]')
def test_resolve(self):
import logging
import logging.handlers
self.assertIs(resolve('logging', None), logging)
self.assertIs(resolve('logging.handlers', None), logging.handlers)
self.assertIs(resolve('logging', 'root'), logging.root)
self.assertEqual(resolve('logging', 'root.debug'), logging.root.debug)
def test_cache_base(self):
actual = get_cache_base()
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
expected = os.path.expandvars('$localappdata')
else:
expected = os.path.expanduser('~')
expected = os.path.join(expected, '.distlib')
self.assertEqual(expected, actual)
self.assertTrue(os.path.isdir(expected))
@unittest.skipIf(os.name != 'posix', 'Test is only valid for POSIX')
def test_path_to_cache_dir_posix(self):
self.assertEqual(path_to_cache_dir('/home/user/some-file.zip'),
'--home--user--some-file.zip.cache')
@unittest.skipIf(os.name != 'nt', 'Test is only valid for Windows')
def test_path_to_cache_dir_nt(self):
self.assertEqual(path_to_cache_dir(r'c:\Users\User\Some-File.zip'),
'c-----Users--User--Some-File.zip.cache')
def test_parse_credentials(self):
cases = (
('example.com', (None, None, 'example.com')),
('[email protected]', ('user', None, 'example.com')),
('user:[email protected]', ('user', 'pwd', 'example.com')),
('user:@example.com', ('user', '', 'example.com')),
('user:pass@[email protected]', ('user', 'pass@word', 'example.com')),
('user:pass:[email protected]', ('user', 'pass:word', 'example.com')),
('user%3Aname:%23%5E%[email protected]', ('user:name', '#^@', 'example.com')),
)
for s, expected in cases:
self.assertEqual(parse_credentials(s), expected)
def test_ensure_slash(self):
self.assertEqual(ensure_slash(''), '/')
self.assertEqual(ensure_slash('/'), '/')
self.assertEqual(ensure_slash('abc'), 'abc/')
self.assertEqual(ensure_slash('def/'), 'def/')
def test_split_filename(self):
self.assertIsNone(split_filename('abl.jquery'))
self.assertEqual(split_filename('abl.jquery-1.4.2-2'),
('abl.jquery', '1.4.2-2', None))
self.assertEqual(split_filename('python-gnupg-0.1'),
('python-gnupg', '0.1', None))
self.assertEqual(split_filename('baklabel-1.0.3-2729-py3.2'),
('baklabel', '1.0.3-2729', '3.2'))
self.assertEqual(split_filename('baklabel-1.0.3-2729-py27'),
('baklabel', '1.0.3-2729', '27'))
self.assertEqual(split_filename('advpy-0.99b'),
('advpy', '0.99b', None))
self.assertEqual(split_filename('asv_files-dev-20120501-01', 'asv_files'),
('asv_files', 'dev-20120501-01', None))
self.assertEqual(split_filename('greenlet-0.4.0-py27-win32'),
('greenlet', '0.4.0', '27'))
self.assertEqual(split_filename('greenlet-0.4.0-py27-linux_x86_64'),
('greenlet', '0.4.0', '27'))
self.assertEqual(split_filename('django-altuser-v0.6.8'),
('django-altuser', 'v0.6.8', None))
self.assertEqual(split_filename('youtube_dl_server-alpha.1'),
('youtube_dl_server', 'alpha.1', None))
self.assertEqual(split_filename('pytest-xdist-dev'),
('pytest-xdist', 'dev', None))
self.assertEqual(split_filename('pytest_xdist-0.1_myfork', None),
('pytest_xdist', '0.1_myfork', None))
self.assertEqual(split_filename('pytest_xdist-0.1_myfork',
'pytest-xdist'),
('pytest_xdist', '0.1_myfork', None))
self.assertEqual(split_filename('pytest_xdist-0.1_myfork',
'pytest_dist'),
('pytest_xdist', '0.1_myfork', None))
def test_convert_path(self):
CP = convert_path
if os.sep == '/':
d = os.path.dirname(__file__)
self.assertEqual(CP(d), d)
else:
self.assertEqual(CP(''), '')
self.assertRaises(ValueError, CP, '/foo')
self.assertRaises(ValueError, CP, 'foo/')
def test_events(self):
collected = []
def handler1(e, *args, **kwargs):
collected.append((1, e, args, kwargs))
def handler2(e, *args, **kwargs):
collected.append((2, e, args, kwargs))
def handler3(e, *args, **kwargs):
if not args:
raise NotImplementedError('surprise!')
collected.append((3, e, args, kwargs))
return (args, kwargs)
e = EventMixin()
e.add('A', handler1)
self.assertRaises(ValueError, e.remove, 'B', handler1)
cases = (
((1, 2), {'buckle': 'my shoe'}),
((3, 4), {'shut': 'the door'}),
)
for case in cases:
e.publish('A', *case[0], **case[1])
e.publish('B', *case[0], **case[1])
for actual, source in zip(collected, cases):
self.assertEqual(actual, (1, 'A') + source[:1] + source[1:])
collected = []
e.add('B', handler2)
self.assertEqual(tuple(e.get_subscribers('A')), (handler1,))
self.assertEqual(tuple(e.get_subscribers('B')), (handler2,))
self.assertEqual(tuple(e.get_subscribers('C')), ())
for case in cases:
e.publish('A', *case[0], **case[1])
e.publish('B', *case[0], **case[1])
actuals = islice(collected, 0, None, 2)
for actual, source in zip(actuals, cases):
self.assertEqual(actual, (1, 'A') + source[:1] + source[1:])
actuals = islice(collected, 1, None, 2)
for actual, source in zip(actuals, cases):
self.assertEqual(actual, (2, 'B') + source[:1] + source[1:])
e.remove('B', handler2)
collected = []
for case in cases:
e.publish('A', *case[0], **case[1])
e.publish('B', *case[0], **case[1])
for actual, source in zip(collected, cases):
self.assertEqual(actual, (1, 'A') + source[:1] + source[1:])
e.add('C', handler3)
collected = []
returned = []
for case in cases:
returned.extend(e.publish('C', *case[0], **case[1]))
returned.extend(e.publish('C'))
for actual, source in zip(collected, cases):
self.assertEqual(actual, (3, 'C') + source[:1] + source[1:])
self.assertEqual(tuple(islice(returned, 1, None, 2)), (None, None))
actuals = islice(returned, 0, None, 2)
for actual, expected in zip(actuals, cases):
self.assertEqual(actual, expected)
def test_sequencer_basic(self):
seq = Sequencer()
steps = (
('check', 'sdist'),
('check', 'register'),
('check', 'sdist'),
('check', 'register'),
('register', 'upload_sdist'),
('sdist', 'upload_sdist'),
('check', 'build_clibs'),
('build_clibs', 'build_ext'),
('build_ext', 'build_py'),
('build_py', 'build_scripts'),
('build_scripts', 'build'),
('build', 'test'),
('register', 'upload_bdist'),
('build', 'upload_bdist'),
('build', 'install_headers'),
('install_headers', 'install_lib'),
('install_lib', 'install_scripts'),
('install_scripts', 'install_data'),
('install_data', 'install_distinfo'),
('install_distinfo', 'install')
)
for pred, succ in steps:
seq.add(pred, succ)
# Note: these tests are sensitive to dictionary ordering
# but work under Python 2.6, 2.7, 3.2, 3.3, 3.4 and PyPy 2.5
cases = (
('check', ['check']),
('register', ['check', 'register']),
('sdist', ['check', 'sdist']),
('build_clibs', ['check', 'build_clibs']),
('build_ext', ['check', 'build_clibs', 'build_ext']),
('build_py', ['check', 'build_clibs', 'build_ext', 'build_py']),
('build_scripts', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts']),
('build', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build']),
('test', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build', 'test']),
('install_headers', ['check', 'build_clibs', 'build_ext',
'build_py', 'build_scripts', 'build',
'install_headers']),
('install_lib', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build', 'install_headers',
'install_lib']),
('install_scripts', ['check', 'build_clibs', 'build_ext',
'build_py', 'build_scripts', 'build',
'install_headers', 'install_lib',
'install_scripts']),
('install_data', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build', 'install_headers',
'install_lib', 'install_scripts',
'install_data']),
('install_distinfo', ['check', 'build_clibs', 'build_ext',
'build_py', 'build_scripts', 'build',
'install_headers', 'install_lib',
'install_scripts', 'install_data',
'install_distinfo']),
('install', ['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build', 'install_headers',
'install_lib', 'install_scripts', 'install_data',
'install_distinfo', 'install']),
('upload_sdist', (['check', 'register', 'sdist', 'upload_sdist'],
['check', 'sdist', 'register', 'upload_sdist'])),
('upload_bdist', (['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'build', 'register',
'upload_bdist'],
['check', 'build_clibs', 'build_ext', 'build_py',
'build_scripts', 'register', 'build',
'upload_bdist'])),
)
for final, expected in cases:
actual = list(seq.get_steps(final))
if isinstance(expected, tuple):
self.assertIn(actual, expected)
else:
self.assertEqual(actual, expected)
dot = seq.dot
expected = '''
digraph G {
check -> build_clibs;
install_lib -> install_scripts;
register -> upload_bdist;
build -> upload_bdist;
build_ext -> build_py;
install_scripts -> install_data;
check -> sdist;
check -> register;
build -> install_headers;
install_data -> install_distinfo;
sdist -> upload_sdist;
register -> upload_sdist;
install_distinfo -> install;
build -> test;
install_headers -> install_lib;
build_py -> build_scripts;
build_clibs -> build_ext;
build_scripts -> build;
}
'''
expected = textwrap.dedent(expected).strip().splitlines()
actual = dot.splitlines()
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[-1], actual[-1])
self.assertEqual(set(expected[1:-1]), set(actual[1:-1]))
actual = seq.strong_connections
expected = (
[
('test',), ('upload_bdist',), ('install',),
('install_distinfo',), ('install_data',), ('install_scripts',),
('install_lib',), ('install_headers',), ('build',),
('build_scripts',), ('build_py',), ('build_ext',),
('build_clibs',), ('upload_sdist',), ('sdist',), ('register',),
('check',)
],
[
('install',), ('install_distinfo',), ('install_data',),
('install_scripts',), ('install_lib',), ('install_headers',),
('test',), ('upload_bdist',), ('build',), ('build_scripts',),
('build_py',), ('build_ext',), ('build_clibs',),
('upload_sdist',), ('sdist',), ('register',), ('check',)
],
[
('upload_sdist',), ('sdist',), ('install',),
('install_distinfo',), ('install_data',), ('upload_bdist',),
('register',), ('install_scripts',), ('install_lib',),
('install_headers',), ('test',), ('build',),
('build_scripts',), ('build_py',), ('build_ext',),
('build_clibs',), ('check',)
],
# Next case added for PyPy
[
('upload_sdist',), ('sdist',), ('upload_bdist',), ('register',),
('test',), ('install',), ('install_distinfo',),
('install_data',), ('install_scripts',), ('install_lib',),
('install_headers',), ('build',), ('build_scripts',),
('build_py',), ('build_ext',), ('build_clibs',), ('check',)
],
# Next case added for Python 3.6
[
('upload_sdist',), ('sdist',), ('upload_bdist',), ('register',),
('install',), ('install_distinfo',), ('install_data',),
('install_scripts',), ('install_lib',), ('install_headers',),
('test',), ('build',), ('build_scripts',), ('build_py',),
('build_ext',), ('build_clibs',), ('check',)
],
# Next case added for Python 3.11
[
('upload_sdist',), ('sdist',), ('install',), ('install_distinfo',),
('install_data',), ('install_scripts',), ('install_lib',),
('install_headers',), ('test',), ('upload_bdist',), ('build',),
('build_scripts',), ('build_py',), ('build_ext',), ('build_clibs',),
('register',), ('check',)
]
)
self.assertIn(actual, expected)
def test_sequencer_cycle(self):
seq = Sequencer()
seq.add('A', 'B')
seq.add('B', 'C')
seq.add('C', 'D')
self.assertEqual(list(seq.get_steps('D')), ['A', 'B', 'C', 'D'])
seq.add('C', 'A')
self.assertEqual(list(seq.get_steps('D')), ['C', 'A', 'B', 'D'])
self.assertFalse(seq.is_step('E'))
self.assertRaises(ValueError, seq.get_steps, 'E')
seq.add_node('E')
self.assertTrue(seq.is_step('E'))
self.assertEqual(list(seq.get_steps('E')), ['E'])
seq.remove_node('E')
self.assertFalse(seq.is_step('E'))
self.assertRaises(ValueError, seq.get_steps, 'E')
seq.remove('C', 'A')
self.assertEqual(list(seq.get_steps('D')), ['A', 'B', 'C', 'D'])
def test_sequencer_removal(self):
seq = Sequencer()
seq.add('A', 'B')
seq.add('B', 'C')
seq.add('C', 'D')
preds = {
'B': set(['A']),
'C': set(['B']),
'D': set(['C'])
}
succs = {
'A': set(['B']),
'B': set(['C']),
'C': set(['D'])
}
self.assertEqual(seq._preds, preds)
self.assertEqual(seq._succs, succs)
seq.remove_node('C')
self.assertEqual(seq._preds, preds)
self.assertEqual(seq._succs, succs)
seq.remove_node('C', True)
self.assertEqual(seq._preds, {'B': set(['A'])})
self.assertEqual(seq._succs, {'A': set(['B'])})
def test_unarchive(self):
import zipfile, tarfile
good_archives = (
('good.zip', zipfile.ZipFile, 'r', 'namelist'),
('good.tar', tarfile.open, 'r', 'getnames'),
('good.tar.gz', tarfile.open, 'r:gz', 'getnames'),
('good.tar.bz2', tarfile.open, 'r:bz2', 'getnames'),
)
bad_archives = ('bad.zip', 'bad.tar', 'bad.tar.gz', 'bad.tar.bz2')
for name, cls, mode, lister in good_archives:
td = tempfile.mkdtemp()
archive = None
try:
name = os.path.join(HERE, name)
unarchive(name, td)
archive = cls(name, mode)
names = getattr(archive, lister)()
for name in names:
p = os.path.join(td, name)
self.assertTrue(os.path.exists(p))
finally:
shutil.rmtree(td)
if archive:
archive.close()
for name in bad_archives:
name = os.path.join(HERE, name)
td = tempfile.mkdtemp()
try:
self.assertRaises(ValueError, unarchive, name, td)
finally:
shutil.rmtree(td)
def test_string_sequence(self):
self.assertTrue(is_string_sequence(['a']))
self.assertTrue(is_string_sequence(['a', 'b']))
self.assertFalse(is_string_sequence(['a', 'b', None]))
self.assertRaises(AssertionError, is_string_sequence, [])
@unittest.skipIf('SKIP_ONLINE' in os.environ, 'Skipping online test')
@unittest.skipUnless(ssl, 'SSL required for this test.')
def test_package_data(self):
data = get_package_data(name='config', version='0.3.6')
self.assertTrue(data)
self.assertTrue('index-metadata' in data)
metadata = data['index-metadata']
self.assertEqual(metadata['name'], 'config')
self.assertEqual(metadata['version'], '0.3.6')
data = get_package_data(name='config', version='0.3.5')
self.assertFalse(data)
def test_zip_dir(self):
d = os.path.join(HERE, 'foofoo')
data = zip_dir(d)
self.assertIsInstance(data, BytesIO)
def test_configurator(self):
d = {
'a': 1,
'b': 2.0,
'c': 'xyz',
'd': 'inc://' + os.path.join(HERE, 'included.json'),
'e': 'inc://' + 'included.json',
'stderr': 'ext://sys.stderr',
'list_o_stuff': [
'cfg://stderr',
'ext://sys.stdout',
'ext://logging.NOTSET',
],
'dict_o_stuff': {
'k1': 'cfg://list_o_stuff[1]',
'k2': 'abc',
'k3': 'cfg://list_o_stuff',
},
'another_dict_o_stuff': {
'k1': 'cfg://dict_o_stuff[k2]',
'k2': 'ext://re.I',
'k3': 'cfg://dict_o_stuff[k3][0]',
},
'custom': {
'()': __name__ + '.TestContainer',
'[]': [1, 'a', 2.0, ('b', 'c', 'd')],
'.': {
'p1': 'a',
'p2': 'b',
'p3': {
'()' : __name__ + '.TestContainer',
'[]': [1, 2],
'.': {
'p1': 'c',
},
},
},
'k1': 'v1',
'k2': 'v2',
}
}
cfg = Configurator(d, HERE)
self.assertEqual(cfg['a'], 1)
self.assertEqual(cfg['b'], 2.0)
self.assertEqual(cfg['c'], 'xyz')
self.assertIs(cfg['stderr'], sys.stderr)
self.assertIs(cfg['list_o_stuff'][0], sys.stderr)
self.assertIs(cfg['list_o_stuff'][1], sys.stdout)
self.assertIs(cfg['list_o_stuff'][-1], 0) # logging.NOTSET == 0
self.assertIs(cfg['dict_o_stuff']['k1'], sys.stdout)
self.assertIs(cfg['another_dict_o_stuff']['k1'], 'abc')
self.assertIs(cfg['another_dict_o_stuff']['k2'], re.I)
self.assertIs(cfg['another_dict_o_stuff']['k3'], sys.stderr)
custom = cfg['custom']
self.assertIsInstance(custom, TestContainer)
self.assertEqual(custom.args, (1, 'a', 2.0, ('b', 'c', 'd')))
self.assertEqual(custom.kwargs, {'k1': 'v1', 'k2': 'v2'})
self.assertEqual(custom.p1, 'a')
self.assertEqual(custom.p2, 'b')
self.assertIsInstance(custom.p3, TestContainer)
self.assertEqual(custom.p3.args, (1, 2))
self.assertEqual(custom.p3.kwargs, {})
self.assertEqual(custom.p3.p1, 'c')
self.assertEqual(cfg['d'], {'foo': 'bar', 'bar': 'baz'})
self.assertEqual(cfg['e'], {'foo': 'bar', 'bar': 'baz'})
def _speed_range(min_speed, max_speed):
return tuple(['%d KB/s' % v for v in range(min_speed,
max_speed + 1)])
def _eta_range(min_eta, max_eta, prefix='ETA '):
msg = prefix + ': 00:00:%02d'
return tuple([msg % v for v in range(min_eta, max_eta + 1)])
class ProgressTestCase(DistlibTestCase):
# Of late, the speed tests keep failing on AppVeyor and Windows
@unittest.skipIf(IN_GITHUB_WORKFLOW or (os.name == 'nt' and
os.environ.get('APPVEYOR') == 'True'),
'Test disabled on some environments due to performance')
def test_basic(self):
# These ranges may need tweaking to cater for especially slow
# machines
if os.name == 'nt':
speed1 = _speed_range(18, 20)
speed2 = _speed_range(20, 22)
else:
speed1 = _speed_range(16, 19)
speed2 = _speed_range(20, 22)
expected = (
(' 10 %', _eta_range(4, 7), speed1),
(' 20 %', _eta_range(4, 7), speed1),
(' 30 %', _eta_range(3, 4), speed1),
(' 40 %', _eta_range(3, 3), speed1),
(' 50 %', _eta_range(2, 2), speed1),
(' 60 %', _eta_range(2, 2), speed1),
(' 70 %', _eta_range(1, 1), speed1),
(' 80 %', _eta_range(1, 1), speed1),
(' 90 %', _eta_range(0, 0), speed1),
('100 %', _eta_range(4, 5, 'Done'), speed2),
)
bar = Progress(maxval=100000).start()
for i, v in enumerate(range(10000, 100000, 10000)):
time.sleep(0.5)
bar.update(v)
p, e, s = expected[i]
self.assertEqual(bar.percentage, p)
self.assertIn(bar.ETA, e, p)
self.assertIn(bar.speed, s)
bar.stop()
p, e, s = expected[i + 1]
self.assertEqual(bar.percentage, p)
self.assertIn(bar.ETA, e, p)
self.assertIn(bar.speed, s)
# Of late, the speed tests keep failing on AppVeyor and Windows
@unittest.skipIf(IN_GITHUB_WORKFLOW or (os.name == 'nt' and
os.environ.get('APPVEYOR') == 'True'),
'Test disabled on some environments due to performance')
def test_unknown(self):
if os.name == 'nt':
speed = _speed_range(17, 20)
else:
speed = _speed_range(17, 19)
expected = (
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
(' ?? %', 'ETA : ??:??:??', speed),
('100 %', 'Done: 00:00:04', speed),
)
bar = Progress(maxval=None).start()
for i, v in enumerate(range(10000, 100000, 10000)):
time.sleep(0.5)
bar.update(v)
p, e, s = expected[i]
self.assertEqual(bar.percentage, p)
self.assertEqual(bar.ETA, e)
self.assertIn(bar.speed, s)
bar.stop()
p, e, s = expected[i + 1]
self.assertEqual(bar.percentage, p)
self.assertEqual(bar.ETA, e)
self.assertIn(bar.speed, s)
class FileOpsTestCase(DistlibTestCase):
def setUp(self):
self.fileop = FileOperator()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.workdir):
shutil.rmtree(self.workdir)
def test_ensure_dir(self):
td = self.workdir
os.rmdir(td)
self.fileop.ensure_dir(td)
self.assertTrue(os.path.exists(td))
self.fileop.dry_run = True
os.rmdir(td)
self.fileop.ensure_dir(td)
self.assertFalse(os.path.exists(td))
def test_ensure_removed(self):
td = self.workdir
self.assertTrue(os.path.exists(td))
self.fileop.dry_run = True
self.fileop.ensure_removed(td)
self.assertTrue(os.path.exists(td))
self.fileop.dry_run = False
self.fileop.ensure_removed(td)
self.assertFalse(os.path.exists(td))
def test_is_writable(self):
sd = 'subdir'
ssd = 'subsubdir'
path = os.path.join(self.workdir, sd, ssd)
os.makedirs(path)
path = os.path.join(path, 'test')
self.assertTrue(self.fileop.is_writable(path))
if os.name == 'posix':
self.assertFalse(self.fileop.is_writable('/etc'))
def test_byte_compile(self):
path = os.path.join(self.workdir, 'hello.py')
dpath = cache_from_source(path, True)
self.fileop.write_text_file(path, 'print("Hello, world!")', 'utf-8')
self.fileop.byte_compile(path, optimize=False)
self.assertTrue(os.path.exists(dpath))
def write_some_files(self):
path = os.path.join(self.workdir, 'file1')
written = []
self.fileop.write_text_file(path, 'test', 'utf-8')
written.append(path)
path = os.path.join(self.workdir, 'file2')
self.fileop.copy_file(written[0], path)
written.append(path)
path = os.path.join(self.workdir, 'dir1')
self.fileop.ensure_dir(path)
return set(written), set([path])
def test_copy_check(self):
srcpath = os.path.join(self.workdir, 'file1')
self.fileop.write_text_file(srcpath, 'test', 'utf-8')
dstpath = os.path.join(self.workdir, 'file2')
os.mkdir(dstpath)
self.assertRaises(ValueError, self.fileop.copy_file, srcpath,
dstpath)
os.rmdir(dstpath)
if os.name == 'posix': # symlinks available
linkpath = os.path.join(self.workdir, 'file3')
self.fileop.write_text_file(linkpath, 'linkdest', 'utf-8')
os.symlink(linkpath, dstpath)
self.assertRaises(ValueError, self.fileop.copy_file, srcpath,
dstpath)
def test_commit(self):
# will assert if record isn't set
self.assertRaises(AssertionError, self.fileop.commit)
self.fileop.record = True
expected = self.write_some_files()
actual = self.fileop.commit()
self.assertEqual(actual, expected)
self.assertFalse(self.fileop.record)
def test_rollback(self):
# will assert if record isn't set
self.assertRaises(AssertionError, self.fileop.commit)
self.fileop.record = True
expected = self.write_some_files()
actual = self.fileop.rollback()
self.assertEqual(os.listdir(self.workdir), [])
self.assertFalse(self.fileop.record)
class GlobTestCaseBase(TempdirManager, DistlibTestCase):
def build_files_tree(self, files):
tempdir = self.mkdtemp()
for filepath in files:
is_dir = filepath.endswith('/')
filepath = os.path.join(tempdir, *filepath.split('/'))
if is_dir:
dirname = filepath
else:
dirname = os.path.dirname(filepath)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if not is_dir:
self.write_file(filepath, 'babar')
return tempdir
@staticmethod
def os_dependent_path(path):
path = path.rstrip('/').split('/')
return os.path.join(*path)
def clean_tree(self, spec):
files = []
for path, includes in spec.items():
if includes:
files.append(self.os_dependent_path(path))
return sorted(files)
class GlobTestCase(GlobTestCaseBase):
def assertGlobMatch(self, glob, spec):
tempdir = self.build_files_tree(spec)
expected = self.clean_tree(spec)
os.chdir(tempdir)
result = sorted(iglob(glob))
self.assertEqual(expected, result)
def test_regex_rich_glob(self):
matches = RICH_GLOB.findall(
r"babar aime les {fraises} est les {huitres}")
self.assertEqual(["fraises", "huitres"], matches)
def test_simple_glob(self):
glob = '*.tp?'
spec = {'coucou.tpl': True,
'coucou.tpj': True,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_simple_glob_in_dir(self):
glob = os.path.join('babar', '*.tp?')
spec = {'babar/coucou.tpl': True,
'babar/coucou.tpj': True,
'babar/toto.bin': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_recursive_glob_head(self):
glob = os.path.join('**', 'tip', '*.t?l')
spec = {'babar/zaza/zuzu/tip/coucou.tpl': True,
'babar/z/tip/coucou.tpl': True,
'babar/tip/coucou.tpl': True,
'babar/zeop/tip/babar/babar.tpl': False,
'babar/z/tip/coucou.bin': False,
'babar/toto.bin': False,
'zozo/zuzu/tip/babar.tpl': True,
'zozo/tip/babar.tpl': True,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_recursive_glob_tail(self):
glob = os.path.join('babar', '**')
spec = {'babar/zaza/': True,
'babar/zaza/zuzu/': True,
'babar/zaza/zuzu/babar.xml': True,
'babar/zaza/zuzu/toto.xml': True,
'babar/zaza/zuzu/toto.csv': True,
'babar/zaza/coucou.tpl': True,
'babar/bubu.tpl': True,
'zozo/zuzu/tip/babar.tpl': False,
'zozo/tip/babar.tpl': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_recursive_glob_middle(self):
glob = os.path.join('babar', '**', 'tip', '*.t?l')
spec = {'babar/zaza/zuzu/tip/coucou.tpl': True,
'babar/z/tip/coucou.tpl': True,
'babar/tip/coucou.tpl': True,
'babar/zeop/tip/babar/babar.tpl': False,
'babar/z/tip/coucou.bin': False,
'babar/toto.bin': False,
'zozo/zuzu/tip/babar.tpl': False,
'zozo/tip/babar.tpl': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_glob_set_tail(self):
glob = os.path.join('bin', '*.{bin,sh,exe}')
spec = {'bin/babar.bin': True,
'bin/zephir.sh': True,
'bin/celestine.exe': True,
'bin/cornelius.bat': False,
'bin/cornelius.xml': False,
'toto/yurg': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_glob_set_middle(self):
glob = os.path.join('xml', '{babar,toto}.xml')
spec = {'xml/babar.xml': True,
'xml/toto.xml': True,
'xml/babar.xslt': False,
'xml/cornelius.sgml': False,
'xml/zephir.xml': False,
'toto/yurg.xml': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_glob_set_head(self):
glob = os.path.join('{xml,xslt}', 'babar.*')
spec = {'xml/babar.xml': True,
'xml/toto.xml': False,
'xslt/babar.xslt': True,
'xslt/toto.xslt': False,
'toto/yurg.xml': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_glob_all(self):
dirs = '{%s,%s}' % (os.path.join('xml', '*'),
os.path.join('xslt', '**'))
glob = os.path.join(dirs, 'babar.xml')
spec = {'xml/a/babar.xml': True,
'xml/b/babar.xml': True,
'xml/a/c/babar.xml': False,
'xslt/a/babar.xml': True,
'xslt/b/babar.xml': True,
'xslt/a/c/babar.xml': True,
'toto/yurg.xml': False,
'Donotwant': False}
self.assertGlobMatch(glob, spec)
def test_invalid_glob_pattern(self):
invalids = [
'ppooa**',
'azzaeaz4**/',
'/**ddsfs',
'**##1e"&e',
'DSFb**c009',
'{',
'{aaQSDFa',
'}',
'aQSDFSaa}',
'{**a,',
',**a}',
'{a**,',
',b**}',
'{a**a,babar}',
'{bob,b**z}',
]
for pattern in invalids:
self.assertRaises(ValueError, iglob, pattern)
def test_parse_requirement(self):
# Empty requirements
for empty in ('', '#this should be ignored'):
self.assertIsNone(parse_requirement(empty))
# Invalid requirements
for invalid in ('a (', 'a/', 'a$', 'a [', 'a () [],', 'a 1.2'):
self.assertRaises(SyntaxError, parse_requirement, invalid)
# Valid requirements
def validate(r, values):
self.assertEqual(r.name, values[0])
self.assertEqual(r.constraints, values[1])
self.assertEqual(r.extras, values[2])
self.assertEqual(r.requirement, values[3])
self.assertEqual(r.url, values[4])
r = parse_requirement('a')
validate(r, ('a', None, None, 'a', None))
r = parse_requirement('a >= 1.2, <2.0,!=1.7')
validate(r, ('a', [('>=', '1.2'), ('<', '2.0'), ('!=', '1.7')], None,
'a >= 1.2, < 2.0, != 1.7', None))
r = parse_requirement('a [ab,cd , ef] >= 1.2, <2.0')
validate(r, ('a', [('>=', '1.2'), ('<', '2.0')], ['ab', 'cd', 'ef'],
'a >= 1.2, < 2.0', None))
r = parse_requirement('a[]')
validate(r, ('a', None, None, 'a', None))
r = parse_requirement('a (== 1.2.*, != 1.2.1.*)')
validate(r, ('a', [('==', '1.2.*'), ('!=', '1.2.1.*')], None,
'a == 1.2.*, != 1.2.1.*', None))
r = parse_requirement('a @ http://domain.com/path#abc=def')
validate(r, ('a', None, None, 'a', 'http://domain.com/path#abc=def'))
# See issue #148
r = parse_requirement('a >=3.6')
validate(r, ('a', [('>=', '3.6')], None, 'a >= 3.6', None))
r = parse_requirement('a >=3.6,')
validate(r, ('a', [('>=', '3.6')], None, 'a >= 3.6', None))
if False: # TODO re-enable
for e in ('*', ':*:', ':meta:', '-', '-abc'):
r = parse_requirement('a [%s]' % e)
validate(r, ('a', None, [e], 'a', None))
def test_write_exports(self):
exports = {
'foo': {
'v1': ExportEntry('v1', 'p1', 's1', []),
'v2': ExportEntry('v2', 'p2', 's2', ['f2=a', 'g2']),
},
'bar': {
'v3': ExportEntry('v3', 'p3', 's3', ['f3', 'g3=h']),
'v4': ExportEntry('v4', 'p4', 's4', ['f4', 'g4']),
},
}
fd, fn = tempfile.mkstemp()
try:
os.close(fd)
with open(fn, 'wb') as f:
write_exports(exports, f)
with open(fn, 'rb') as f:
actual = read_exports(f)
self.assertEqual(actual, exports)
finally:
os.remove(fn)
def test_get_extras(self):
cases = (
(['*'], ['i18n'], set(['i18n'])),
(['*', '-bar'], ['foo', 'bar'], set(['foo'])),
)
for requested, available, expected in cases:
actual = get_extras(requested, available)
self.assertEqual(actual, expected)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| []
| []
| [
"APPVEYOR"
]
| [] | ["APPVEYOR"] | python | 1 | 0 | |
src/adservice/src/main/java/hipstershop/AdService.java | /*
* Copyright 2018, Google LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hipstershop;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import hipstershop.Demo.Ad;
import hipstershop.Demo.AdRequest;
import hipstershop.Demo.AdResponse;
import io.grpc.Server;
import io.grpc.ServerBuilder;
import io.grpc.StatusRuntimeException;
import io.grpc.health.v1.HealthCheckResponse.ServingStatus;
import io.grpc.services.*;
import io.grpc.stub.StreamObserver;
import io.opencensus.common.Duration;
import io.opencensus.contrib.grpc.metrics.RpcViews;
import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration;
import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter;
import io.opencensus.exporter.trace.jaeger.JaegerTraceExporter;
import io.opencensus.exporter.trace.stackdriver.StackdriverTraceConfiguration;
import io.opencensus.exporter.trace.stackdriver.StackdriverTraceExporter;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracer;
import io.opencensus.trace.Tracing;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public final class AdService {
private static final Logger logger = LogManager.getLogger(AdService.class);
private static final Tracer tracer = Tracing.getTracer();
private static int MAX_ADS_TO_SERVE = 2;
private Server server;
private HealthStatusManager healthMgr;
private static final AdService service = new AdService();
private void start() throws IOException {
int port = Integer.parseInt(System.getenv("PORT"));
healthMgr = new HealthStatusManager();
server =
ServerBuilder.forPort(port)
.addService(new AdServiceImpl())
.addService(healthMgr.getHealthService())
.build()
.start();
logger.info("Ad Service started, listening on " + port);
Runtime.getRuntime()
.addShutdownHook(
new Thread() {
@Override
public void run() {
// Use stderr here since the logger may have been reset by its JVM shutdown hook.
System.err.println("*** shutting down gRPC ads server since JVM is shutting down");
AdService.this.stop();
System.err.println("*** server shut down");
}
});
healthMgr.setStatus("", ServingStatus.SERVING);
}
private void stop() {
if (server != null) {
healthMgr.clearStatus("");
server.shutdown();
}
}
private static class AdServiceImpl extends hipstershop.AdServiceGrpc.AdServiceImplBase {
/**
* Retrieves ads based on context provided in the request {@code AdRequest}.
*
* @param req the request containing context.
* @param responseObserver the stream observer which gets notified with the value of {@code
* AdResponse}
*/
@Override
public void getAds(AdRequest req, StreamObserver<AdResponse> responseObserver) {
AdService service = AdService.getInstance();
Span span = tracer.getCurrentSpan();
try {
span.putAttribute("method", AttributeValue.stringAttributeValue("getAds"));
List<Ad> allAds = new ArrayList<>();
logger.info("received ad request (context_words=" + req.getContextKeysList() + ")");
if (req.getContextKeysCount() > 0) {
span.addAnnotation(
"Constructing Ads using context",
ImmutableMap.of(
"Context Keys",
AttributeValue.stringAttributeValue(req.getContextKeysList().toString()),
"Context Keys length",
AttributeValue.longAttributeValue(req.getContextKeysCount())));
for (int i = 0; i < req.getContextKeysCount(); i++) {
Collection<Ad> ads = service.getAdsByCategory(req.getContextKeys(i));
allAds.addAll(ads);
}
} else {
span.addAnnotation("No Context provided. Constructing random Ads.");
allAds = service.getRandomAds();
}
if (allAds.isEmpty()) {
// Serve random ads.
span.addAnnotation("No Ads found based on context. Constructing random Ads.");
allAds = service.getRandomAds();
}
AdResponse reply = AdResponse.newBuilder().addAllAds(allAds).build();
responseObserver.onNext(reply);
responseObserver.onCompleted();
} catch (StatusRuntimeException e) {
logger.log(Level.WARN, "GetAds Failed", e.getStatus());
responseObserver.onError(e);
}
}
}
private static final ImmutableListMultimap<String, Ad> adsMap = createAdsMap();
private Collection<Ad> getAdsByCategory(String category) {
return adsMap.get(category);
}
private static final Random random = new Random();
private List<Ad> getRandomAds() {
List<Ad> ads = new ArrayList<>(MAX_ADS_TO_SERVE);
Collection<Ad> allAds = adsMap.values();
for (int i = 0; i < MAX_ADS_TO_SERVE; i++) {
ads.add(Iterables.get(allAds, random.nextInt(allAds.size())));
}
return ads;
}
private static AdService getInstance() {
return service;
}
/** Await termination on the main thread since the grpc library uses daemon threads. */
private void blockUntilShutdown() throws InterruptedException {
if (server != null) {
server.awaitTermination();
}
}
private static ImmutableListMultimap<String, Ad> createAdsMap() {
Ad camera =
Ad.newBuilder()
.setRedirectUrl("/product/2ZYFJ3GM2N")
.setText("Film camera for sale. 50% off.")
.build();
Ad lens =
Ad.newBuilder()
.setRedirectUrl("/product/66VCHSJNUP")
.setText("Vintage camera lens for sale. 20% off.")
.build();
Ad recordPlayer =
Ad.newBuilder()
.setRedirectUrl("/product/0PUK6V6EV0")
.setText("Vintage record player for sale. 30% off.")
.build();
Ad bike =
Ad.newBuilder()
.setRedirectUrl("/product/9SIQT8TOJO")
.setText("City Bike for sale. 10% off.")
.build();
Ad baristaKit =
Ad.newBuilder()
.setRedirectUrl("/product/1YMWWN1N4O")
.setText("Home Barista kitchen kit for sale. Buy one, get second kit for free")
.build();
Ad airPlant =
Ad.newBuilder()
.setRedirectUrl("/product/6E92ZMYYFZ")
.setText("Air plants for sale. Buy two, get third one for free")
.build();
Ad terrarium =
Ad.newBuilder()
.setRedirectUrl("/product/L9ECAV7KIM")
.setText("Terrarium for sale. Buy one, get second one for free")
.build();
return ImmutableListMultimap.<String, Ad>builder()
.putAll("photography", camera, lens)
.putAll("vintage", camera, lens, recordPlayer)
.put("cycling", bike)
.put("cookware", baristaKit)
.putAll("gardening", airPlant, terrarium)
.build();
}
private static void initStackdriver() {
logger.info("Initialize StackDriver");
long sleepTime = 10; /* seconds */
int maxAttempts = 5;
boolean statsExporterRegistered = false;
boolean traceExporterRegistered = false;
for (int i = 0; i < maxAttempts; i++) {
try {
if (!traceExporterRegistered) {
StackdriverTraceExporter.createAndRegister(
StackdriverTraceConfiguration.builder().build());
traceExporterRegistered = true;
}
if (!statsExporterRegistered) {
StackdriverStatsExporter.createAndRegister(
StackdriverStatsConfiguration.builder()
.setExportInterval(Duration.create(60, 0))
.build());
statsExporterRegistered = true;
}
} catch (Exception e) {
if (i == (maxAttempts - 1)) {
logger.log(
Level.WARN,
"Failed to register Stackdriver Exporter."
+ " Tracing and Stats data will not reported to Stackdriver. Error message: "
+ e.toString());
} else {
logger.info("Attempt to register Stackdriver Exporter in " + sleepTime + " seconds ");
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(sleepTime));
} catch (Exception se) {
logger.log(Level.WARN, "Exception while sleeping" + se.toString());
}
}
}
}
logger.info("StackDriver initialization complete.");
}
private static void initJaeger() {
String jaegerAddr = System.getenv("JAEGER_SERVICE_ADDR");
if (jaegerAddr != null && !jaegerAddr.isEmpty()) {
String jaegerUrl = String.format("http://%s/api/traces", jaegerAddr);
// Register Jaeger Tracing.
JaegerTraceExporter.createAndRegister(jaegerUrl, "adservice");
logger.info("Jaeger initialization complete.");
} else {
logger.info("Jaeger initialization disabled.");
}
}
/** Main launches the server from the command line. */
public static void main(String[] args) throws IOException, InterruptedException {
// Registers all RPC views.
RpcViews.registerAllGrpcViews();
new Thread(
new Runnable() {
public void run() {
initStackdriver();
}
})
.start();
// Register Jaeger
initJaeger();
// Start the RPC server. You shouldn't see any output from gRPC before this.
logger.info("AdService starting.");
final AdService service = AdService.getInstance();
service.start();
service.blockUntilShutdown();
}
}
| [
"\"PORT\"",
"\"JAEGER_SERVICE_ADDR\""
]
| []
| [
"PORT",
"JAEGER_SERVICE_ADDR"
]
| [] | ["PORT", "JAEGER_SERVICE_ADDR"] | java | 2 | 0 | |
genomebrowser/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'genomebrowser.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
settings.go | package main
import (
"log"
"net/url"
"os"
"strconv"
)
// Settings Program settings definition
type Settings struct {
ServerHost string
ServerPort int
TargetUrl *url.URL
ReplaceUrl *url.URL
}
// Load settings from the environment variables
func (s *Settings) Load() {
// required target url
targetUrl := os.Getenv("TARGET_URL")
if targetUrl == "" {
log.Fatal("TARGET_URL is required but missing in the env variables")
}
url, err := url.Parse(targetUrl)
if err != nil {
log.Fatal("Cannot parse TARGET_URL as valid url")
}
s.TargetUrl = url
// required replace url
replaceUrl := os.Getenv("REPLACE_URL")
if replaceUrl == "" {
log.Fatal("REPLACE_URL is required but missing in the env variables")
}
url, err = url.Parse(replaceUrl)
if err != nil {
log.Fatal("Cannot parse REPLACE_URL as valid url")
}
s.ReplaceUrl = url
// optional params
host := os.Getenv("HOST")
if host != "" {
s.ServerHost = host
}
port, err := strconv.Atoi(os.Getenv("PORT"))
if err == nil {
s.ServerPort = port
}
}
| [
"\"TARGET_URL\"",
"\"REPLACE_URL\"",
"\"HOST\"",
"\"PORT\""
]
| []
| [
"PORT",
"TARGET_URL",
"REPLACE_URL",
"HOST"
]
| [] | ["PORT", "TARGET_URL", "REPLACE_URL", "HOST"] | go | 4 | 0 | |
run/grpc-ping/main.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Sample grpc-ping acts as an intermediary to the ping service.
package main
import (
"log"
"net"
"os"
"google.golang.org/grpc"
pb "github.com/GoogleCloudPlatform/golang-samples/run/grpc-ping/pkg/api/v1"
)
func main() {
log.Printf("grpc-ping: starting server...")
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
listener, err := net.Listen("tcp", ":"+port)
if err != nil {
log.Fatalf("net.Listen: %v", err)
}
grpcServer := grpc.NewServer()
pb.RegisterPingServiceServer(grpcServer, &pingService{})
if err = grpcServer.Serve(listener); err != nil {
log.Fatal(err)
}
}
// [END run_grpc_server]
// conn holds an open connection to the ping service.
var conn *grpc.ClientConn
func init() {
if os.Getenv("GRPC_PING_HOST") != "" {
var err error
conn, err = NewConn(os.Getenv("GRPC_PING_HOST"), os.Getenv("GRPC_PING_INSECURE") != "")
if err != nil {
log.Fatal(err)
}
} else {
log.Println("Starting without support for SendUpstream: configure with 'GRPC_PING_HOST' environment variable. E.g., example.com:443")
}
}
| [
"\"PORT\"",
"\"GRPC_PING_HOST\"",
"\"GRPC_PING_HOST\"",
"\"GRPC_PING_INSECURE\""
]
| []
| [
"PORT",
"GRPC_PING_HOST",
"GRPC_PING_INSECURE"
]
| [] | ["PORT", "GRPC_PING_HOST", "GRPC_PING_INSECURE"] | go | 3 | 0 | |
onnxruntime/python/backend/backend.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""
Implements ONNX's backend API.
"""
from onnx import ModelProto
from onnx import helper
from onnx import version
from onnx.checker import check_model
from onnx.backend.base import Backend
from onnxruntime import InferenceSession, SessionOptions, get_device, get_available_providers
from onnxruntime.backend.backend_rep import OnnxRuntimeBackendRep
import unittest
import os
class OnnxRuntimeBackend(Backend):
"""
Implements
`ONNX's backend API <https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md>`_
with *ONNX Runtime*.
The backend is mostly used when you need to switch between
multiple runtimes with the same API.
`Importing models from ONNX to Caffe2 <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxCaffe2Import.ipynb>`_
shows how to use *caffe2* as a backend for a converted model.
Note: This is not the official Python API.
""" # noqa: E501
allowReleasedOpsetsOnly = bool(os.getenv('ALLOW_RELEASED_ONNX_OPSET_ONLY', '1') == '1')
@classmethod
def is_compatible(cls, model, device=None, **kwargs):
"""
Return whether the model is compatible with the backend.
:param model: unused
:param device: None to use the default device or a string (ex: `'CPU'`)
:return: boolean
"""
if device is None:
device = get_device()
return cls.supports_device(device)
@classmethod
def is_opset_supported(cls, model):
"""
Return whether the opset for the model is supported by the backend.
When By default only released onnx opsets are allowed by the backend
To test new opsets env variable ALLOW_RELEASED_ONNX_OPSET_ONLY should be set to 0
:param model: Model whose opsets needed to be verified.
:return: boolean and error message if opset is not supported.
"""
if cls.allowReleasedOpsetsOnly:
for opset in model.opset_import:
domain = opset.domain if opset.domain else 'ai.onnx'
try:
key = (domain, opset.version)
if not (key in helper.OP_SET_ID_VERSION_MAP):
error_message = ("Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{0}' version '{1}'.".format(domain, opset.version))
return False, error_message
except AttributeError:
# for some CI pipelines accessing helper.OP_SET_ID_VERSION_MAP
# is generating attribute error. TODO investigate the pipelines to
# fix this error. Falling back to a simple version check when this error is encountered
if (domain == 'ai.onnx' and opset.version > 12) or (domain == 'ai.ommx.ml' and opset.version > 2):
error_message = ("Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{0}' version '{1}'.".format(domain, opset.version))
return False, error_message
return True, ""
@classmethod
def supports_device(cls, device):
"""
Check whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
if device == 'CUDA':
device = 'GPU'
return device in get_device()
@classmethod
def prepare(cls, model, device=None, **kwargs):
"""
Load the model and creates a :class:`onnxruntime.InferenceSession`
ready to be used as a backend.
:param model: ModelProto (returned by `onnx.load`),
string for a filename or bytes for a serialized model
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`onnxruntime.SessionOptions`
:return: :class:`onnxruntime.InferenceSession`
"""
if isinstance(model, OnnxRuntimeBackendRep):
return model
elif isinstance(model, InferenceSession):
return OnnxRuntimeBackendRep(model)
elif isinstance(model, (str, bytes)):
options = SessionOptions()
for k, v in kwargs.items():
if hasattr(options, k):
setattr(options, k, v)
excluded_providers = os.getenv('ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS', default="").split(',')
providers = [x for x in get_available_providers() if (x not in excluded_providers)]
inf = InferenceSession(model, sess_options=options, providers=providers)
# backend API is primarily used for ONNX test/validation. As such, we should disable session.run() fallback
# which may hide test failures.
inf.disable_fallback()
if device is not None and not cls.supports_device(device):
raise RuntimeError("Incompatible device expected '{0}', got '{1}'".format(device, get_device()))
return cls.prepare(inf, device, **kwargs)
else:
# type: ModelProto
# check_model serializes the model anyways, so serialize the model once here
# and reuse it below in the cls.prepare call to avoid an additional serialization
# only works with onnx >= 1.10.0 hence the version check
onnx_version = tuple(map(int, (version.version.split(".")[:3])))
onnx_supports_serialized_model_check = onnx_version >= (1, 10, 0)
bin_or_model = model.SerializeToString() if onnx_supports_serialized_model_check else model
check_model(bin_or_model)
opset_supported, error_message = cls.is_opset_supported(model)
if not opset_supported:
raise unittest.SkipTest(error_message)
# Now bin might be serialized, if it's not we need to serialize it otherwise we'll have
# an infinite recursive call
bin = bin_or_model
if not isinstance(bin, (str, bytes)):
bin = bin.SerializeToString()
return cls.prepare(bin, device, **kwargs)
@classmethod
def run_model(cls, model, inputs, device=None, **kwargs):
"""
Compute the prediction.
:param model: :class:`onnxruntime.InferenceSession` returned
by function *prepare*
:param inputs: inputs
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`onnxruntime.RunOptions`
:return: predictions
"""
rep = cls.prepare(model, device, **kwargs)
return rep.run(inputs, **kwargs)
@classmethod
def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
'''
This method is not implemented as it is much more efficient
to run a whole model than every node independently.
'''
raise NotImplementedError("It is much more efficient to run a whole model than every node independently.")
is_compatible = OnnxRuntimeBackend.is_compatible
prepare = OnnxRuntimeBackend.prepare
run = OnnxRuntimeBackend.run_model
supports_device = OnnxRuntimeBackend.supports_device
| []
| []
| [
"ALLOW_RELEASED_ONNX_OPSET_ONLY",
"ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"
]
| [] | ["ALLOW_RELEASED_ONNX_OPSET_ONLY", "ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] | python | 2 | 0 | |
e2e/setup.go | package e2e
import (
net "net/url"
"os"
)
func zkUrl() string {
return "zk://" + os.Getenv("ZK_HOSTS")
}
func consulUrl() string {
return "consul://" + os.Getenv("CONSUL_HOSTS")
}
func etcdUrl() string {
return "etcd://" + os.Getenv("ETCD_HOSTS")
}
const (
testRoot = "unit-tests/backend_test/"
)
func kvstores() []*net.URL {
urls := []*net.URL{}
for _, u := range []string{
consulUrl(),
etcdUrl(),
zkUrl(),
} {
url, err := net.Parse(u)
if err != nil {
panic(err)
}
urls = append(urls, url)
}
return urls
}
| [
"\"ZK_HOSTS\"",
"\"CONSUL_HOSTS\"",
"\"ETCD_HOSTS\""
]
| []
| [
"ETCD_HOSTS",
"CONSUL_HOSTS",
"ZK_HOSTS"
]
| [] | ["ETCD_HOSTS", "CONSUL_HOSTS", "ZK_HOSTS"] | go | 3 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^g=zse&)53(93+8wf4&fbk4#^82os=cd=r9z3uuj4137n*zeud'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# custom apps
'rest_framework',
'rest_framework.authtoken',
'user',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
tzone.go | package main
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/mitchellh/go-homedir"
"github.com/mmcquillan/joda"
"github.com/mmcquillan/matcher"
"github.com/tkuchiki/go-timezone"
terminal "github.com/wayneashleyberry/terminal-dimensions"
"gopkg.in/yaml.v2"
)
func main() {
// list function
match, _, _ := matcher.Matcher("<bin> list ", strings.Join(os.Args, " "))
if match {
tz := timezone.GetAllTimezones()
for k, v := range tz {
fmt.Printf("%s\n", k)
for _, t := range v {
fmt.Printf(" - %s\n", t)
}
}
os.Exit(0)
}
// init zones
var zones []Zone
// capture input
match, _, values := matcher.Matcher("<bin> [zones] [--date] [--24]", strings.Join(os.Args, " "))
if !match || values["zones"] == "help" {
fmt.Println("tzone list")
fmt.Println("tzone [zones] [--date] [--24]")
fmt.Println("")
fmt.Println("examples:")
fmt.Println(" tzone UTC")
fmt.Println(" tzone UTC,Local")
fmt.Println(" tzone America/New_York")
fmt.Println(" tzone America/New_York:Matt")
fmt.Println(" tzone America/New_York:Matt:8:17")
fmt.Println(" tzone UTC,America/New_York:Matt:8:17")
os.Exit(0)
}
// set 24
block := 8
format := "hha"
if os.Getenv("TZ_24") == "true" || values["24"] == "true" {
block = 6
format = "H"
}
// set date
date := false
if os.Getenv("TZ_DATE") == "true" || values["date"] == "true" {
date = true
}
// users file
home, err := homedir.Dir()
if err != nil {
fmt.Println("ERROR: No home dir - " + err.Error())
}
file := path.Join(home, ".tzone")
// set zones
if values["zones"] != "" {
for _, tz := range strings.Split(values["zones"], ",") {
zones = append(zones, splitInput(tz))
}
} else if _, err := os.Stat(file); err == nil {
tzFile, err := ioutil.ReadFile(file)
err = yaml.Unmarshal([]byte(tzFile), &zones)
if err != nil {
fmt.Println("ERROR: Unmarshal error - " + err.Error())
}
} else if os.Getenv("TZ_ZONES") != "" {
for _, tz := range strings.Split(os.Getenv("TZ_ZONES"), ",") {
zones = append(zones, splitInput(tz))
}
} else {
zones = append(zones, splitInput("UTC"))
zones = append(zones, splitInput("Local"))
}
// max name
name := 0
for i, z := range zones {
if z.Name == "" {
zones[i].Name = z.TZ
}
if len(z.Name) > name {
name = len(z.Name)
}
}
// spacing
width, _ := terminal.Width()
remWidth := int(width) - (name + 2)
full := (remWidth - (remWidth % block)) / block
half := (full - (full % 2)) / 2
// colors
inactive := color.New(color.BgWhite).Add(color.FgBlack)
active := color.New(color.BgCyan).Add(color.FgBlack)
now := color.New(color.BgGreen).Add(color.FgBlack)
nope := color.New(color.BgRed)
info := color.New(color.FgRed)
// set time
n := time.Now().UTC()
// output
fmt.Printf("\n")
for _, z := range zones {
offset, match := findOffset(z.TZ)
if match {
if z.Highlight {
info.Printf(" %s%s ", strings.Repeat(" ", name-len(z.Name)), z.Name)
} else {
fmt.Printf(" %s%s ", strings.Repeat(" ", name-len(z.Name)), z.Name)
}
for i := -half + 1; i <= half; i++ {
t := n.Add(time.Second * time.Duration((i*3600)+offset))
if i == 0 {
if date {
now.Printf(" %s - %s ", t.Format(joda.Format("MM/dd")), t.Format(joda.Format(format)))
} else {
now.Printf(" %s ", t.Format(joda.Format(format)))
}
} else if t.Hour() >= z.Start && t.Hour() <= z.End {
active.Printf(" %s ", t.Format(joda.Format(format)))
} else {
inactive.Printf(" %s ", t.Format(joda.Format(format)))
}
fmt.Printf(" ")
}
} else {
fmt.Printf(" %s%s ", strings.Repeat(" ", name-len(z.Name)), z.Name)
nope.Printf(" Cannot find timezone: %s ", z.TZ)
}
fmt.Printf("\n\n")
}
}
func splitInput(input string) (z Zone) {
// cleanup
input = strings.TrimSpace(input)
// default
z = Zone{
TZ: input,
Name: input,
Start: 25,
End: 25,
Highlight: false,
}
// do we split
if strings.Contains(input, ":") {
p := strings.Split(input, ":")
switch len(p) {
case 2:
z.TZ = strings.TrimSpace(p[0])
z.Name = strings.TrimSpace(p[1])
case 3:
z.TZ = strings.TrimSpace(p[0])
z.Name = strings.TrimSpace(p[1])
if val, err := strconv.ParseInt(p[2], 10, 32); err == nil {
z.Start = int(val)
}
case 4:
z.TZ = strings.TrimSpace(p[0])
z.Name = strings.TrimSpace(p[1])
if val, err := strconv.ParseInt(p[2], 10, 32); err == nil {
z.Start = int(val)
}
if val, err := strconv.ParseInt(p[3], 10, 32); err == nil {
z.End = int(val)
}
}
}
if strings.HasPrefix(z.Name, "@") {
z.Name = strings.Replace(z.Name, "@", "", -1)
z.Highlight = true
}
// return
return z
}
func findOffset(tz string) (offset int, match bool) {
// init
offset = 0
match = false
// shorthand timezones
tzShorthand := strings.ToUpper(tz)
switch tzShorthand {
case "LOCAL":
tz = "Local"
case "EASTERN":
tz = "America/New_York"
case "CENTRAL":
tz = "America/Chicago"
case "MOUNTAIN":
tz = "America/Denver"
case "PACIFIC":
tz = "America/Los_Angeles"
}
// first, std timezone lib
loc, err := time.LoadLocation(tz)
if err == nil {
match = true
n := time.Now().UTC().In(loc)
_, offset = n.Zone()
}
// second, tz lib
if !match {
offset, err = timezone.GetOffset(tz)
if err == nil {
match = true
}
}
// return
return offset, match
}
type Zone struct {
TZ string `yaml:"tz"`
Name string `yaml:"name"`
Start int `yaml:"start"`
End int `yaml:"end"`
Highlight bool `yaml:"highlight"`
}
| [
"\"TZ_24\"",
"\"TZ_DATE\"",
"\"TZ_ZONES\"",
"\"TZ_ZONES\""
]
| []
| [
"TZ_24",
"TZ_DATE",
"TZ_ZONES"
]
| [] | ["TZ_24", "TZ_DATE", "TZ_ZONES"] | go | 3 | 0 | |
main.py |
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import random
from flask import Flask, request
from google.cloud import bigquery
import datetime
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
app = Flask(__name__)
moves = ['R', 'L', 'F']
client = bigquery.Client()
executor = ThreadPoolExecutor()
table_name = "allegro-hackathon12-2186.battle_ds.events"
@app.route("/", methods=['GET'])
def index():
return "Let the battle begin!"
@app.route("/", methods=['POST'])
def move():
logger.info(request.json)
state = request.json
me_href = state["_links"]["self"]["href"]
dims_w = state["arena"]["dims"][0]
dims_h = state["arena"]["dims"][1]
arena = dict()
insert_rows = []
ts = datetime.datetime.now().timestamp()
for player_name, player_state in state["arena"]["state"].items():
pos_x = player_state["x"]
pos_y = player_state["y"]
arena[(pos_x, pos_y)] = player_name
if player_name == me_href:
me_x = pos_x
me_y = pos_y
me_d = player_state["direction"]
me_was_hit = player_state["wasHit"]
#insert_rows.append({
# 'timestamp': ts,
# 'player': player_name,
# **player_state,
# })
#executor.submit(client.insert_rows_json, table_name, insert_rows)
if me_was_hit: # run!
if random.random() < 0.3:
return "F"
# check if somebody is on the line
if me_d == "N":
if (me_x, me_y - 1) in arena or (me_x, me_y - 2) in arena or (me_x, me_y - 3) in arena:
return "T"
elif me_d == "E":
if (me_x+1, me_y) in arena or (me_x+2, me_y) in arena or (me_x+3, me_y) in arena:
return "T"
elif me_d == "S":
if (me_x, me_y + 1) in arena or (me_x, me_y + 2) in arena or (me_x, me_y + 3) in arena:
return "T"
else:
if (me_x-1, me_y) in arena or (me_x-2, me_y) in arena or (me_x-2, me_y) in arena:
return "T"
if random.random() < 0.2:
return "F"
return "R" # round turret
if __name__ == "__main__":
app.run(debug=False,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
| []
| []
| [
"PORT",
"LOGLEVEL"
]
| [] | ["PORT", "LOGLEVEL"] | python | 2 | 0 | |
acceptance-tests/src/test/java/tech/pegasys/web3signer/tests/comparison/CompareApisAcceptanceTestBase.java | /*
* Copyright 2020 ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package tech.pegasys.web3signer.tests.comparison;
import static tech.pegasys.web3signer.dsl.lotus.FilecoinKeyType.BLS;
import static tech.pegasys.web3signer.dsl.lotus.FilecoinKeyType.SECP256K1;
import tech.pegasys.web3signer.core.signing.KeyType;
import tech.pegasys.web3signer.dsl.lotus.FilecoinKey;
import tech.pegasys.web3signer.dsl.lotus.LotusNode;
import tech.pegasys.web3signer.dsl.signer.SignerConfigurationBuilder;
import tech.pegasys.web3signer.dsl.utils.MetadataFileHelpers;
import tech.pegasys.web3signer.tests.AcceptanceTestBase;
import java.nio.file.Path;
import java.util.Map;
import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.io.TempDir;
public class CompareApisAcceptanceTestBase extends AcceptanceTestBase {
protected static final LotusNode LOTUS_NODE =
new LotusNode(Integer.parseInt(System.getenv("LOTUS_PORT")));
protected static final int NO_OF_BLS_KEYS = 2;
protected static final int NO_OF_SECP_KEYS = 2;
protected static Map<String, FilecoinKey> addressMap =
LOTUS_NODE.createKeys(NO_OF_BLS_KEYS, NO_OF_SECP_KEYS);
protected static Map<String, FilecoinKey> nonExistentAddressMap =
Map.of(
"t3q7sj7rgvvlfpc7gx7z7jeco5x3q3aa4g6s54w3rl5alzdb6xa422seznjmtp7agboegcvrakcv22eo5bjlna",
new FilecoinKey(
BLS, Bytes.fromBase64String("NlWGbwCt8rEK7OTDYat3jy+3tj60cER81cIDUSEnFjU=")),
"t3rzhwtyxwmfbgikcddna3bv3eedn3meyt75gc6urmunbju26asfhaycsim6oc5qvyqbldziq53l3ujfpprhfa",
new FilecoinKey(
BLS, Bytes.fromBase64String("tFzDgbfTT983FdhnZ8xZjr0JdP37DcijmVm+XvurhFY=")),
"t1jcaxt7yoonwcvllj52kjzh4buo7gjmzemm3c3ny",
new FilecoinKey(
SECP256K1, Bytes.fromBase64String("5airIxsTE4wslOvXDcHoTnZE2ZWYGw/ZMwJQY0p7Pi4=")),
"t1te5vep7vlsxoh5vqz3fqlm76gewzpd63juum6jq",
new FilecoinKey(
SECP256K1, Bytes.fromBase64String("0oKQu6xyg0bOCaqNqpHULzxDa4VDQu1D19iArDL8+JU=")));
protected static final MetadataFileHelpers metadataFileHelpers = new MetadataFileHelpers();
@TempDir protected Path testDirectory;
protected void initAndStartSigner(final boolean initKeystoreDirectory) {
if (initKeystoreDirectory) {
initSignerKeystoreDirectory();
}
final SignerConfigurationBuilder builder =
new SignerConfigurationBuilder().withKeyStoreDirectory(testDirectory).withMode("filecoin");
startSigner(builder.build());
}
private void initSignerKeystoreDirectory() {
addressMap.forEach(
(fcAddress, key) ->
metadataFileHelpers.createUnencryptedYamlFileAt(
keyConfigFile(fcAddress),
key.getPrivateKeyHex(),
key.getType() == BLS ? KeyType.BLS : KeyType.SECP256K1));
}
private Path keyConfigFile(final String prefix) {
return testDirectory.resolve(prefix + ".yaml");
}
}
| [
"\"LOTUS_PORT\""
]
| []
| [
"LOTUS_PORT"
]
| [] | ["LOTUS_PORT"] | java | 1 | 0 | |
skaterblog/wsgi.py | """
WSGI config for skaterblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("skaterblog"))
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mkdocs/plugins.py | # coding: utf-8
"""
Implements the plugin API for MkDocs.
"""
from __future__ import unicode_literals
import pkg_resources
import logging
from collections import OrderedDict
from mkdocs.config.base import Config
log = logging.getLogger('mkdocs.plugins')
EVENTS = (
'config', 'pre_build', 'files', 'nav', 'env', 'pre_template', 'template_context',
'post_template', 'pre_page', 'page_read_source', 'page_markdown',
'page_content', 'page_context', 'post_page', 'post_build', 'serve'
)
def get_plugins():
""" Return a dict of all installed Plugins by name. """
plugins = pkg_resources.iter_entry_points(group='mkdocs.plugins')
return dict((plugin.name, plugin) for plugin in plugins)
class BasePlugin(object):
"""
Plugin base class.
All plugins should subclass this class.
"""
config_scheme = ()
config = {}
def load_config(self, options, config_file_path=None):
""" Load config from a dict of options. Returns a tuple of (errors, warnings)."""
self.config = Config(schema=self.config_scheme, config_file_path=config_file_path)
self.config.load_dict(options)
return self.config.validate()
class PluginCollection(OrderedDict):
"""
A collection of plugins.
In addition to being a dict of Plugin instances, each event method is registered
upon being added. All registered methods for a given event can then be run in order
by calling `run_event`.
"""
def __init__(self, *args, **kwargs):
super(PluginCollection, self).__init__(*args, **kwargs)
self.events = {x: [] for x in EVENTS}
def _register_event(self, event_name, method):
""" Register a method for an event. """
self.events[event_name].append(method)
def __setitem__(self, key, value, **kwargs):
if not isinstance(value, BasePlugin):
raise TypeError(
'{0}.{1} only accepts values which are instances of {3}.{4} '
'sublcasses'.format(self.__module__, self.__name__,
BasePlugin.__module__, BasePlugin.__name__))
super(PluginCollection, self).__setitem__(key, value, **kwargs)
# Register all of the event methods defined for this Plugin.
for event_name in (x for x in dir(value) if x.startswith('on_')):
method = getattr(value, event_name)
if callable(method):
self._register_event(event_name[3:], method)
def run_event(self, name, item, **kwargs):
"""
Run all registered methods of an event.
`item` is the object to be modified and returned by the event method.
All other keywords are variables for context, but would not generally
be modified by the event method.
"""
for method in self.events[name]:
result = method(item, **kwargs)
# keep item if method returned `None`
if result is not None:
item = result
return item
| []
| []
| []
| [] | [] | python | null | null | null |
configs/sscod/coco/exp2_caseA_curcon.py | import os
import pandas as pd
import numpy as np
data_root = '/data/coco/'
df = pd.read_csv('configs/sscod/coco/coco_classes_caseA.csv')
seen_classes = np.where(df['take'].values != 0)[0] + 1
unseen_classes = np.where(df['take'].values == 0)[0] + 1
if os.environ.get('EONC', '1') == '1':
used_classes_for_eval = seen_classes
else:
used_classes_for_eval = unseen_classes
# model settings
model = dict(
type='SSCOD',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=1,
norm_eval=True,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[512, 1024, 2048],
out_channels=256,
start_level=0,
conv_cfg=dict(type='ConvWS'),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
add_extra_convs=True,
extra_convs_on_inputs=False,
num_outs=5),
bbox_head=dict(
type='ATSS_COD_Head',
stacked_obj_convs=4,
embed_channels=256,
exp_type=2,
unseen_classID=unseen_classes,
classwise_loss=None,
pairwise_loss=dict(
type='CurContrastLoss', in_channels=256,
embed_channels=None, scale=1.0, margin=0.5,
easy_margin=False, ignore_class0=True, loss_weight=1.0),
embed_norm_cfg=None,
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=1,
anchor_ratios=[1.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2],
conv_cfg=dict(type='ConvWS'),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_cls=dict(
type='FocalLoss', use_sigmoid=True,
gamma=2.0, alpha=0.25, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
codet=dict(
multiply_obj_score=False,
max_pairs=100,
matching_thr=0.54))
# dataset settings
dataset_type = 'VOCDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='ObjDetAugmentation', policy='v0'),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='CocoContrastiveDataset',
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'images/train2017/',
pipeline=train_pipeline,
pipeline_aug=train_pipeline,
pair_itself=False),
val=dict(
type='Coco_COD_Dataset',
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline),
test=dict(
type='Coco_COD_Dataset',
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline,
used_class_ids=used_classes_for_eval,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=4e-2, momentum=0.9, weight_decay=4e-5)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='Cosine', min_lr=4e-4, by_epoch=False,
warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
| []
| []
| [
"EONC"
]
| [] | ["EONC"] | python | 1 | 0 | |
leetcode/101-200/0124.Binary-Tree-Maximum-Path-Sum/Solution_test.go | package Solution
import (
"fmt"
"reflect"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
// Solution func Info
type SolutionFuncType func(node *TreeNode) int
var SolutionFuncList = []SolutionFuncType{
maxPathSum_1,
}
// Test case info struct
type Case struct {
name string
input *TreeNode
expect int
}
// Test case
var cases = []Case{
{
name: "TestCase 1",
input: &TreeNode{
Val: 1,
Left: &TreeNode{Val: 2},
Right: &TreeNode{Val: 3},
},
expect: 6,
},
{
name: "TestCase 2",
input: &TreeNode{
-10,
&TreeNode{Val: 9},
&TreeNode{
20,
&TreeNode{Val: 15},
&TreeNode{Val: 7},
},
},
expect: 42,
},
}
// TestSolution Run test case for all solutions
func TestSolution(t *testing.T) {
ast := assert.New(t)
for _, f := range SolutionFuncList {
funcName := strings.Split(runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(), ".")[1]
for _, c := range cases {
t.Run(fmt.Sprintf("%s %s", funcName, c.name), func(t *testing.T) {
got := f(c.input)
ast.Equal(c.expect, got,
"func: %v case: %v ", funcName, c.name)
})
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
var/spack/repos/builtin/packages/xios/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
import os
class Xios(Package):
"""XML-IO-SERVER library for IO management of climate models."""
homepage = "https://forge.ipsl.jussieu.fr/ioserver/wiki"
version('1.0', revision=910,
svn='http://forge.ipsl.jussieu.fr/ioserver/svn/XIOS/branchs/xios-1.0')
version('develop', svn='http://forge.ipsl.jussieu.fr/ioserver/svn/XIOS/trunk')
variant('mode', values=('debug', 'dev', 'prod'), default='dev',
description='Build for debugging, development or production')
# NOTE: oasis coupler could be supported with a variant
# Use spack versions of blitz and netcdf for compatibility
# with recent compilers and optimised platform libraries:
patch('bld_extern_1.0.patch', when='@:1.0')
patch('bld_extern_1.x.patch', when='@1.1:')
# Workaround bug #17782 in llvm, where reading a double
# followed by a character is broken (e.g. duration '1d'):
patch('llvm_bug_17782.patch', when='@1.1: %clang')
depends_on('netcdf+mpi')
depends_on('netcdf-fortran')
depends_on('hdf5+mpi')
depends_on('mpi')
depends_on('boost')
depends_on('blitz')
depends_on('perl', type='build')
depends_on('perl-uri', type='build')
depends_on('gmake', type='build')
@when('%clang')
def patch(self):
"""Fix type references that are ambiguous for clang."""
for dirpath, dirnames, filenames in os.walk('src'):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
# Use boost definition of type shared_ptr:
filter_file(r'([^:/])shared_ptr<',
r'\1boost::shared_ptr<', filepath)
# Use type long for position in output stream:
filter_file(r'oss.tellp\(\) *- *startPos',
r'(long)oss.tellp() - startPos', filepath)
def xios_env(self):
file = join_path('arch', 'arch-SPACK.env')
touch(file)
def xios_path(self):
file = join_path('arch', 'arch-SPACK.path')
spec = self.spec
paths = {'NETCDF_INC_DIR': spec['netcdf'].prefix.include,
'NETCDF_LIB_DIR': spec['netcdf'].prefix.lib,
'HDF5_INC_DIR': spec['hdf5'].prefix.include,
'HDF5_LIB_DIR': spec['hdf5'].prefix.lib}
text = r"""
NETCDF_INCDIR="-I {NETCDF_INC_DIR}"
NETCDF_LIBDIR="-L {NETCDF_LIB_DIR}"
NETCDF_LIB="-lnetcdff -lnetcdf"
MPI_INCDIR=""
MPI_LIBDIR=""
MPI_LIB=""
HDF5_INCDIR="-I {HDF5_INC_DIR}"
HDF5_LIBDIR="-L {HDF5_LIB_DIR}"
HDF5_LIB="-lhdf5_hl -lhdf5"
OASIS_INCDIR=""
OASIS_LIBDIR=""
OASIS_LIB=""
"""
with open(file, 'w') as f:
f.write(text.format(**paths))
def xios_fcm(self):
file = join_path('arch', 'arch-SPACK.fcm')
spec = self.spec
param = dict()
param['MPICXX'] = spec['mpi'].mpicxx
param['MPIFC'] = spec['mpi'].mpifc
param['CC'] = self.compiler.cc
param['FC'] = self.compiler.fc
param['BOOST_INC_DIR'] = spec['boost'].prefix.include
param['BOOST_LIB_DIR'] = spec['boost'].prefix.lib
param['BLITZ_INC_DIR'] = spec['blitz'].prefix.include
param['BLITZ_LIB_DIR'] = spec['blitz'].prefix.lib
if spec.satisfies('%clang platform=darwin'):
param['LIBCXX'] = '-lc++'
else:
param['LIBCXX'] = '-lstdc++'
if any(map(spec.satisfies, ('%gcc', '%intel', '%clang'))):
text = r"""
%CCOMPILER {MPICXX}
%FCOMPILER {MPIFC}
%LINKER {MPIFC}
%BASE_CFLAGS -ansi -w -D_GLIBCXX_USE_CXX11_ABI=0 \
-I{BOOST_INC_DIR} -I{BLITZ_INC_DIR}
%PROD_CFLAGS -O3 -DBOOST_DISABLE_ASSERTS
%DEV_CFLAGS -g -O2
%DEBUG_CFLAGS -g
%BASE_FFLAGS -D__NONE__
%PROD_FFLAGS -O3
%DEV_FFLAGS -g -O2
%DEBUG_FFLAGS -g
%BASE_INC -D__NONE__
%BASE_LD -L{BOOST_LIB_DIR} -L{BLITZ_LIB_DIR} -lblitz {LIBCXX}
%CPP {CC} -E
%FPP {CC} -E -P -x c
%MAKE gmake
""".format(**param)
elif spec.satisfies('%cce'):
# In the CC compiler prior to cce/8.3.7,
# optimisation must be reduced to avoid a bug,
# as reported by Mike Rezny at the UK Met Office:
if spec.satisfies('%[email protected]:'):
param.update({'CC_OPT_DEV': '-O2', 'CC_OPT_PROD': '-O3'})
else:
param.update({'CC_OPT_DEV': '-O1', 'CC_OPT_PROD': '-O1'})
text = r"""
%CCOMPILER {MPICXX}
%FCOMPILER {MPIFC}
%LINKER {MPIFC}
%BASE_CFLAGS -DMPICH_SKIP_MPICXX -h msglevel_4 -h zero -h gnu \
-I{BOOST_INC_DIR} -I{BLITZ_INC_DIR}
%PROD_CFLAGS {CC_OPT_PROD} -DBOOST_DISABLE_ASSERTS
%DEV_CFLAGS {CC_OPT_DEV}
%DEBUG_CFLAGS -g
%BASE_FFLAGS -em -m 4 -e0 -eZ
%PROD_FFLAGS -O3
%DEV_FFLAGS -G2
%DEBUG_FFLAGS -g
%BASE_INC -D__NONE__
%BASE_LD -D__NONE__ -L{BOOST_LIB_DIR} -L{BLITZ_LIB_DIR} -lblitz
%CPP cpp
%FPP cpp -P -CC
%MAKE gmake
""".format(**param)
else:
raise InstallError('Unsupported compiler.')
with open(file, 'w') as f:
f.write(text)
def install(self, spec, prefix):
env['CC'] = spec['mpi'].mpicc
env['CXX'] = spec['mpi'].mpicxx
env['F77'] = spec['mpi'].mpif77
env['FC'] = spec['mpi'].mpifc
options = ['--full',
'--%s' % spec.variants['mode'].value,
'--arch', 'SPACK',
'--netcdf_lib', 'netcdf4_par',
'--job', str(make_jobs)]
self.xios_env()
self.xios_path()
self.xios_fcm()
make_xios = Executable('./make_xios')
make_xios(*options)
mkdirp(spec.prefix)
install_tree('bin', spec.prefix.bin)
install_tree('lib', spec.prefix.lib)
install_tree('inc', spec.prefix.include)
install_tree('etc', spec.prefix.etc)
install_tree('cfg', spec.prefix.cfg)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_build(self):
mpirun = os.getenv('MPIRUN')
if mpirun is None:
mpirun = 'mpiexec'
mpiexec = Executable(mpirun)
with working_dir('inputs'):
try:
mpiexec('-n', '2', join_path('..', 'bin', 'test_client.exe'))
except Exception:
raise InstallError(
'Test failed; defining MPIRUN variable may help.')
| []
| []
| [
"MPIRUN"
]
| [] | ["MPIRUN"] | python | 1 | 0 | |
starport/pkg/xos/env.go | package xos
import (
"errors"
"os"
"strings"
)
// IsInPath checks if binpath is in system path.
func IsInPath(binpath string) error {
paths := strings.Split(os.Getenv("PATH"), ":")
for _, path := range paths {
if path == binpath {
return nil
}
}
return errors.New("bin path is not in PATH")
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
experiment/run_exp.py | import sys
import requests
import os
# Used for debugging gpu errors
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import pandas as pd
import networkx as nx
import utils as u
import torch
import torch.distributed as dist
import numpy as np
import time
import datetime
import random
from copy import deepcopy
from functools import partial
from experiment import Experiment
from custom_labeler import Custom_Labeler
from tlp_labeler import Tlp_Labeler
# taskers
import link_pred_tasker as lpt
# models
from models import egcn_components as mls
from models.decoder import WSDM_Decoder
from models.gcn import GCN
from models.gat import GAT
from models import egcn_h
from models import egcn_h_old
from models import egcn_o
from models import egcn_o_old
from models.gclstm import GCLSTM
from models.tgat import TGAT
from models.tgatneighborfinder import NeighborFinder as TGAT_NeighborFinder
from models.tgn import TGN
from models.tgn_utils.utils import get_neighbor_finder as TGN_get_neighbor_finder
from models.tgn_utils.utils import (
compute_time_statistics as TGN_compute_time_statistics,
)
import splitter as sp
import Cross_Entropy as ce
def random_param_value(param, param_min, param_max, type="int"):
if str(param) is None or str(param).lower() == "none":
if type == "int":
return random.randrange(param_min, param_max + 1)
elif type == "logscale":
interval = np.logspace(np.log10(param_min), np.log10(param_max), num=100)
return np.random.choice(interval, 1)[0]
else:
return random.uniform(param_min, param_max)
else:
return param
def prepare_args(args):
heuristics = [
"cn",
"aa",
"jaccard",
"newton",
"ccpa",
"random_heuristic",
"random_adaptive",
]
static_models = ["gcn", "gat", "random"]
discrete_models = ["egcn_o", "egcn_h", "gclstm", "egcn_h_old", "egcn_o_old"]
continuous_models = ["tgat", "tgn"]
args.heuristic = args.model in heuristics
if (
not args.model
in static_models
+ discrete_models
+ continuous_models
):
raise NotImplementedError("Model {} not found".format(args.model))
elif args.model in static_models or args.model in heuristics:
args.temporal_granularity = "static"
elif args.model in discrete_models:
args.temporal_granularity = "discrete"
elif args.model in continuous_models:
args.temporal_granularity = "continuous"
if (
args.num_hist_steps in ["expanding", "static"]
and args.temporal_granularity != "static"
):
raise ValueError(
"An expanding or static time window can only be used with static temporal granularity"
)
if hasattr(args, "gcn_parameters"):
if args.gcn_parameters["layer_2_feats_same_as_l1"]:
args.gcn_parameters["layer_2_feats"] = args.gcn_parameters["layer_1_feats"]
if (
"lstm_l2_feats_name_as_l1" in args.gcn_parameters.keys()
) and args.gcn_parameters["layer_2_feats_same_as_l1"]:
args.gcn_parameters["layer_2_feats"] = args.gcn_parameters["layer_1_feats"]
if hasattr(args, "cont_gcn_parameters"):
if args.cont_gcn_parameters["layer_2_feats_same_as_l1"]:
args.cont_gcn_parameters["layer_2_feats"] = args.cont_gcn_parameters[
"layer_1_feats"
]
if (
"lstm_l2_feats_name_as_l1" in args.cont_gcn_parameters.keys()
) and args.cont_gcn_parameters["layer_2_feats_same_as_l1"]:
args.cont_gcn_parameters["layer_2_feats"] = args.cont_gcn_parameters[
"layer_1_feats"
]
return args
def build_tasker(
args, dataset, temporal_granularity, custom_labeler=None, tlp_labeler=None
):
if args.task == "link_pred":
return lpt.Link_Pred_Tasker(
args, dataset, temporal_granularity, custom_labeler, tlp_labeler
)
elif args.task == "edge_cls":
return ect.Edge_Cls_Tasker(args, dataset)
elif args.task == "node_cls":
return nct.Node_Cls_Tasker(args, dataset)
elif args.task == "static_node_cls":
return nct.Static_Node_Cls_Tasker(args, dataset)
else:
raise NotImplementedError("still need to implement the other tasks")
def build_custom_labeler(settype, args, continuous_dataset):
return Custom_Labeler(args, continuous_dataset, settype)
def build_tlp_labeler(args, continuous_dataset):
return Tlp_Labeler(args, continuous_dataset)
def build_gcn(args, tasker, dataset, splitter, feats_per_node, model=None):
if model == None:
model = args.model
gcn_args = u.Namespace(args.gcn_parameters)
gcn_args.feats_per_node = feats_per_node
if model == "simplegcn": # Same as 'gcn' only manually implemented
gcn = mls.Sp_GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif model == "gcn": # GCN but the PyGeometric implementation
gcn = GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif model == "gat":
gcn = GAT(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif model == "seal":
gcn = SEAL(gcn_args)
elif model == "gclstm":
gcn = GCLSTM(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif model == "skipgcn":
gcn = mls.Sp_Skip_GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif model == "skipfeatsgcn":
gcn = mls.Sp_Skip_NodeFeats_GCN(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif model == "tgat":
force_random_edge_features = (
hasattr(args, "force_random_edge_features")
and args.force_random_edge_features == True
)
neighborhood_finder = TGAT_NeighborFinder(dataset)
edge_features, node_features = u.get_initial_features_continuous(
args, gcn_args, dataset, force_random_edge_features
)
print(
"edge feature and node features size",
edge_features.shape,
node_features.shape,
)
args.gcn_parameters["layer_2_feats"] = node_features.shape[1]
gcn = TGAT(
gcn_args,
neighborhood_finder,
node_features,
edge_features,
num_layers=gcn_args.num_layers,
n_head=gcn_args.attention_heads,
drop_out=gcn_args.dropout,
device=args.device,
).to(args.device)
elif model == "tgn":
# Default values
n_neighbors = 20
uniform = False # Uniform_neighborhood_finder_sampling
message_dim = 100
memory_update_at_end = (
False # Update memory at the beginning or at the end of the batch
)
embedding_module = "graph_attention" # choices=["graph_attention", "graph_sum", "identity", "time"]
message_function = "identity" # choices=['identity', 'mlp']
aggregator = "last"
memory_updater = "gru" # choices=['gru', 'rnn']
use_destination_embedding_in_message = False
use_source_embedding_in_message = False
neighborhood_finder = TGN_get_neighbor_finder(dataset, uniform)
force_random_edge_features = (
hasattr(args, "force_random_edge_features")
and args.force_random_edge_features == True
)
edge_features, node_features = u.get_initial_features_continuous(
args, gcn_args, dataset, force_random_edge_features
)
args.gcn_parameters["layer_2_feats"] = node_features.shape[1]
memory_dim = node_features.shape[1]
# Compute time statistics
sources = dataset.edges["idx"][:, dataset.cols.source]
destinations = dataset.edges["idx"][:, dataset.cols.target]
timestamps = dataset.edges["idx"][:, dataset.cols.time]
(
mean_time_shift_src,
std_time_shift_src,
mean_time_shift_dst,
std_time_shift_dst,
) = TGN_compute_time_statistics(sources, destinations, timestamps)
gcn = TGN(
neighbor_finder=neighborhood_finder,
node_features=node_features,
edge_features=edge_features,
device=args.device,
n_layers=gcn_args.num_layers,
n_heads=gcn_args.attention_heads,
dropout=gcn_args.dropout,
use_memory=gcn_args.use_memory,
message_dimension=message_dim,
memory_dimension=memory_dim,
memory_update_at_start=not memory_update_at_end,
embedding_module_type=embedding_module,
message_function=message_function,
aggregator_type=aggregator,
memory_updater_type=memory_updater,
n_neighbors=n_neighbors,
mean_time_shift_src=mean_time_shift_src,
std_time_shift_src=std_time_shift_src,
mean_time_shift_dst=mean_time_shift_dst,
std_time_shift_dst=std_time_shift_dst,
use_destination_embedding_in_message=use_destination_embedding_in_message,
use_source_embedding_in_message=use_source_embedding_in_message,
).to(args.device)
elif model == "random":
gcn = mls.Random(gcn_args, args.device).to(args.device)
else:
assert args.num_hist_steps > 0, "more than one step is necessary to train LSTM"
if model == "lstmA":
gcn = mls.Sp_GCN_LSTM_A(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif model == "gruA":
gcn = mls.Sp_GCN_GRU_A(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif model == "lstmB":
gcn = mls.Sp_GCN_LSTM_B(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif model == "gruB":
gcn = mls.Sp_GCN_GRU_B(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif model == "egcn_h":
gcn = egcn_h.EGCN(gcn_args, activation=torch.nn.RReLU(), device=args.device)
elif model == "egcn_o":
gcn = egcn_o.EGCN(gcn_args, activation=torch.nn.RReLU(), device=args.device)
elif model == "egcn_h_old":
gcn = egcn_h_old.EGCN(
gcn_args, activation=torch.nn.RReLU(), device=args.device
)
elif model == "egcn_o_old":
gcn = egcn_o_old.EGCN(
gcn_args, activation=torch.nn.RReLU(), device=args.device
)
elif model == "skipfeatsegcn_h":
gcn = egcn_h.EGCN(
gcn_args,
activation=torch.nn.RReLU(),
device=args.device,
skipfeats=True,
)
else:
raise NotImplementedError("simple Model not found")
return gcn, args
def build_classifier(args, tasker):
time_encoder_dim = args.decoder_time_encoder_dim
encoder_out_feats = args.gcn_parameters["layer_2_feats"]
if "node_cls" == args.task or "static_node_cls" == args.task:
mult = 1
else:
mult = 2
if "gru" in args.model or ("lstm" in args.model and not args.model == "gclstm"):
in_feats = encoder_out_feats * mult
elif args.model == "skipfeatsgcn" or args.model == "skipfeatsegcn_h":
in_feats = (encoder_out_feats + args.gcn_parameters["feats_per_node"]) * mult
else:
in_feats = encoder_out_feats * mult
if hasattr(args, "has_time_query") and args.has_time_query == True:
time_encoder = mls.TimeEncode(time_encoder_dim).to(args.device)
# 2x because we encode both the delta until query start (start time)
# and the query duration (duration time). These are both each fed into the mlp
nodes_feats = in_feats
continuous_dataset = tasker.tlp_labeler.cdata
if continuous_dataset.name == "wsdm-A":
edge_type_feats = continuous_dataset.edge_type_features
max_vals = edge_type_feats.max()
in_feats_etype = (max_vals + 1).sum()
# Should be 659 for WSDM-A
assert in_feats_etype == 659
else:
in_feats_etype = continuous_dataset.type_max_val + 1
out_feats_etype = args.decoder_edge_type_emb_dim # 90
time_one_hot_feats = 7 + 24 # one_hot days and one_hot hours
in_feats_time = time_encoder_dim + time_one_hot_feats
out_feats_time = args.decoder_time_emb_dim # 30
decoder = WSDM_Decoder(
args,
time_encoder=time_encoder,
out_features=tasker.num_classes,
nodes_feats=nodes_feats,
in_feats_etype=in_feats_etype,
out_feats_etype=out_feats_etype,
in_feats_time=in_feats_time,
out_feats_time=out_feats_time,
).to(args.device)
return decoder
else:
return mls.Classifier(
args, in_features=in_feats, out_features=tasker.num_classes
).to(args.device)
# Return list of args ready for use that the framework iterates through for the grid search
# Each args in the list is the args used for each run of the grid search
def build_grid(all_args):
lists_not_included_in_grid_search = [
"class_weights",
"comments",
"link_features",
"node_features",
]
args_dict = vars(all_args)
# Gather parameters for permutation
for_permutation = {}
for key in args_dict:
if (
type(args_dict[key]) is list
and not key in lists_not_included_in_grid_search
):
for_permutation[key] = args_dict[key]
elif type(args_dict[key]) is dict:
d = args_dict[key]
for inner_key in d:
if type(d[inner_key]) is list:
for_permutation["{}.{}".format(key, inner_key)] = d[inner_key]
# Convenience
# Putting learning rate at the end, it will be ordered by permutate to be the outermost parameter in the grid search
# Thus for continuous models, the training of the encoder happens intermittently, rather than all at once in the beginning
if all_args.model in ["tgn", "tgat"]:
lr = for_permutation.pop("learning_rate")
for_permutation["learning_rate"] = lr
args_list = []
def permutate(for_permutation, args_dict, permutated):
if for_permutation == {}:
# Add grid arg to show which grid cell this is
args_dict["grid"] = permutated
args_list.append(args_dict)
else:
new_for_permutation = deepcopy(for_permutation)
param_name, param_values = new_for_permutation.popitem()
for param in param_values:
new_args_dict = deepcopy(args_dict)
new_permutated = deepcopy(permutated)
new_permutated[param_name] = param
if "." in param_name:
key, inner_key = param_name.split(".")
new_args_dict[key][inner_key] = param
else:
new_args_dict[param_name] = param
permutate(new_for_permutation, new_args_dict, new_permutated)
permutate(for_permutation, args_dict, {})
assert len(args_list) == u.prod(
[len(param_list) for param_list in for_permutation.values()]
)
return [u.Namespace(args) for args in args_list]
def read_data_master(args, dataset_name=None):
if not dataset_name:
dataset_name = args.data
master = pd.read_csv(os.path.join("config", "data_master.csv"), index_col=0)
if not dataset_name in master:
error_mssg = "Dataset not found in data master. Dataset name {}.\n".format(
dataset_name
)
error_mssg += "Available datasets are as follows:\n"
error_mssg += "\n".join(master.keys())
raise ValueError(error_mssg)
meta_info = master[dataset_name]
args.data_filepath = os.path.join("data", meta_info["filename"])
args.snapshot_size = int(meta_info["snapshot size"])
args.query_horizon = int(meta_info["query horizon"])
args.train_proportion = float(meta_info["train proportion"])
args.val_proportion = float(meta_info["val proportion"])
steps_acc = meta_info["steps accounted"]
try:
args.steps_accounted = int(steps_acc)
except ValueError:
args.steps_accounted = None
if meta_info["node encoding"] == "2 hot":
args.use_2_hot_node_feats = True
args.use_1_hot_node_feats = False
args.use_1_log_hot_node_feats = False
elif meta_info["node encoding"] == "1 log hot":
args.use_2_hot_node_feats = False
args.use_1_hot_node_feats = False
args.use_1_log_hot_node_feats = True
else:
args.use_2_hot_node_feats = False
args.use_1_hot_node_feats = True
args.use_1_log_hot_node_feats = False
return args
def run_experiment(args):
### Seed, rank and cuda
global rank, wsize, use_cuda
# if hasattr(args, 'ncores') and type(args.ncores) == type(1) and args.ncores >= 1:
# print(args.ncores)
# torch.set_num_threads(16)
args.use_cuda = torch.cuda.is_available() and args.use_cuda
args.device = "cpu"
if args.use_cuda:
args.device = "cuda"
print("use CUDA:", args.use_cuda, "- device:", args.device)
try:
dist.init_process_group(backend="mpi")
rank = dist.get_rank()
wsize = dist.get_world_size()
print(
"Hello from process {} (out of {})".format(
dist.get_rank(), dist.get_world_size()
)
)
if args.use_cuda:
torch.cuda.set_device(rank)
print("using the device {}".format(torch.cuda.current_device()))
except:
rank = 0
wsize = 1
print(
(
"MPI backend not preset. Set process rank to {} (out of {})".format(
rank, wsize
)
)
)
if args.seed is None and args.seed != "None":
seed = 123 + rank
else:
seed = args.seed
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
args.seed = seed
args.rank = rank
args.wsize = wsize
### Load datasets and build everything
args = read_data_master(args)
args = prepare_args(args)
exp = Experiment(
args,
build_tasker,
build_gcn,
build_classifier,
build_custom_labeler,
build_tlp_labeler,
)
trainer = exp.build_trainer()
trainer.train()
# Use custom_labeler to evaluate final results
# It is a bit wasteful to reload the entire experiment, but it is simple.
if (
hasattr(args, "custom_labeler")
and args.custom_labeler == True
and args.save_predictions
):
settype_l = ["initial", "intermediate", "test"]
for settype in settype_l:
# Ensure args are reset before each run and load
args = read_data_master(args)
args = prepare_args(args)
build_custom_labeler_part = partial(build_custom_labeler, settype)
exp = Experiment(
args,
build_tasker,
build_gcn,
build_classifier,
build_custom_labeler_part,
build_tlp_labeler,
eval_only=True,
)
trainer = exp.build_trainer()
trainer.eval()
def notify(args, text):
if args.notify:
print("notify " + text)
notify_text = u.get_experiment_notification(args) + " " + text
print("Notifying of completion")
with open("../slack_incoming_hook.txt", "r") as f:
slack_hook = f.read()
try:
# Send a notification to a slack channel.
r = requests.post(slack_hook, json={"text": notify_text})
if r.status_code != requests.codes.ok:
print("Notification failed, status code {}".format(r.status_code))
except requests.exceptions.ConnectionError as e:
print("could not connect, is there any internet?")
if __name__ == "__main__":
parser = u.create_parser()
args = u.parse_args(parser)
config_folder = "/".join(args["config_file"].split("/")[:-1])
master_args = u.read_master_args(config_folder + "/master.yaml")
all_args = u.Namespace({**master_args, **args})
args = all_args
# Assign the requested random hyper parameters
# args = build_random_hyper_params(args) #Replaced by grid
exp_args_list = build_grid(all_args)
exp_durations = []
start_time_tot = time.time()
remove_microsec = lambda x: str(x).split(".")[0]
cell_tot = len(exp_args_list)
cell_num = 0
start_notify_done = False
print("Total number of runs", cell_tot)
try:
for i, exp_args in enumerate(exp_args_list):
cell_num = i + 1
print("Grid cell {}/{} args {}".format(cell_num, cell_tot, exp_args.grid))
if not u.skip_cell(exp_args):
if not start_notify_done:
notify(args, "{}/{} started".format(cell_num, cell_tot))
start_notify_done = True
# Initiate log to signal to other cells that this cell is taken
# Useful if cells are run as different processes and preprocessing takes time.
u.add_log_lock(exp_args)
start = time.time()
run_experiment(exp_args)
if exp_args.one_cell:
print("Exiting after one cell")
break
end = time.time()
exp_durations.append(end - start)
else:
print("SKIPPING CELL " + u.get_gridcell(exp_args))
except Exception as e:
notify(args, "{}/{} crashed {}".format(cell_num, cell_tot, str(e)))
raise
except:
notify(args, "{}/{} crashed {}".format(cell_num, cell_tot, sys.exc_info()[0]))
raise
end_time_tot = time.time()
time_tot = end_time_tot - start_time_tot
duration = remove_microsec(str(datetime.timedelta(seconds=time_tot)))
notify(args, "{}/{} complete, duration {}".format(cell_num, cell_tot, duration))
| []
| []
| [
"CUDA_LAUNCH_BLOCKING"
]
| [] | ["CUDA_LAUNCH_BLOCKING"] | python | 1 | 0 | |
tutorial/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutotrial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src-py/getUniqueReads.py | import click
import pysam
@click.command()
@click.option('--bf', help='path for bamfile')
def subsample(bf):
bam = pysam.AlignmentFile(bf, 'rb')
wBam = pysam.AlignmentFile(bf[:-4]+'_unique.bam', 'wb', template=bam)
for line in bam:
if(line.get_tag('NH') == 1):
wBam.write(line)
bam.close()
wBam.close()
if __name__=="__main__":
subsample()
| []
| []
| []
| [] | [] | python | null | null | null |
runtests.py | #!/usr/bin/env python
# -*- coding: UTF-8
from __future__ import unicode_literals, absolute_import
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
website1/wsgi.py | """
WSGI config for website1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website1.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_cppcheck.py | #!/usr/bin/env python
# Copyright 2020-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test quality of the source code using Cppcheck."""
import unittest
import os
import multiprocessing
from distutils.spawn import find_executable
class TestCppCheck(unittest.TestCase):
"""Unit test for CppCheck errors."""
def setUp(self):
"""Set up called before each test."""
self.OPENDR_HOME = os.environ['OPENDR_HOME']
self.reportFilename = os.path.join(self.OPENDR_HOME, 'tests', 'cppcheck_report.txt')
self.extensions = ['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'c++', 'h++']
def test_cppcheck_is_correctly_installed(self):
"""Test Cppcheck is correctly installed."""
self.assertTrue(
find_executable('cppcheck') is not None,
msg='Cppcheck is not installed on this computer.'
)
def run_cppcheck(self, command):
"""Run Cppcheck command and check for errors."""
curdir = os.getcwd()
os.chdir(self.OPENDR_HOME)
if os.path.isfile(self.reportFilename):
os.remove(self.reportFilename)
os.system(command) # warning: on Windows, the length of command is limited to 8192 characters
if os.path.isfile(self.reportFilename):
with open(self.reportFilename, 'r') as reportFile:
reportText = reportFile.read()
self.assertTrue(
not reportText,
msg='Cppcheck detected some errors:\n\n%s' % reportText
)
os.remove(self.reportFilename)
os.chdir(curdir)
def add_source_files(self, sourceDirs, skippedDirs, skippedfiles=[]):
command = ''
modified_files = os.path.join(self.OPENDR_HOME, 'tests', 'sources', 'modified_files.txt')
if os.path.isfile(modified_files):
with open(modified_files, 'r') as file:
for line in file:
line = line.strip()
extension = os.path.splitext(line)[1][1:].lower()
if extension not in self.extensions:
continue
for sourceDir in sourceDirs:
if line.startswith(sourceDir):
shouldSkip = False
for skipped in skippedDirs + skippedfiles:
if line.startswith(skipped):
shouldSkip = True
break
if not shouldSkip:
command += ' \"' + line + '\"'
for source in skippedfiles:
command += ' --suppress=\"*:' + source + '\"'
else:
for source in skippedfiles:
command += ' --suppress=\"*:' + source + '\"'
for source in skippedDirs:
command += ' -i\"' + source + '\"'
for source in sourceDirs:
command += ' \"' + source + '\"'
return command
def test_sources_with_cppcheck(self):
"""Test Webots with Cppcheck."""
sourceDirs = [
'src/c',
]
skippedDirs = [
]
includeDirs = [
'include/'
]
command = 'cppcheck --enable=warning,style,performance,portability --inconclusive --force -q'
command += ' -j %s' % str(multiprocessing.cpu_count())
command += ' --inline-suppr --suppress=invalidPointerCast --suppress=useStlAlgorithm --suppress=uninitMemberVar '
command += ' --suppress=noCopyConstructor --suppress=noOperatorEq --suppress=strdupCalled'
# command += ' --xml ' # Uncomment this line to get more information on the errors
command += ' --output-file=\"' + self.reportFilename + '\"'
for include in includeDirs:
command += ' -I\"' + include + '\"'
sources = self.add_source_files(sourceDirs, skippedDirs)
if not sources:
return
command += sources
self.run_cppcheck(command)
def test_projects_with_cppcheck(self):
"""Test projects with Cppcheck."""
sourceDirs = [
'projects/c_api/src'
]
skippedDirs = [
]
skippedfiles = [
]
command = 'cppcheck --enable=warning,style,performance,portability --inconclusive --force -q '
command += '--inline-suppr --suppress=invalidPointerCast --suppress=useStlAlgorithm -UKROS_COMPILATION '
command += '--suppress=strdupCalled --suppress=ctuOneDefinitionRuleViolation '
# command += '--xml ' # Uncomment this line to get more information on the errors
command += '--std=c++03 --output-file=\"' + self.reportFilename + '\"'
sources = self.add_source_files(sourceDirs, skippedDirs, skippedfiles)
if not sources:
return
command += sources
self.run_cppcheck(command)
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"OPENDR_HOME"
]
| [] | ["OPENDR_HOME"] | python | 1 | 0 | |
OracleWebLogic/samples/12212-domain-online-config/container-scripts/jms-config.py | from java.io import FileInputStream
import java.lang
import os
import string
propInputStream = FileInputStream("/u01/oracle/jms-config.properties")
configProps = Properties()
configProps.load(propInputStream)
#Read Properties
##############################
# 1 - Connecting details - read from system arguments
##############################
domainname = os.environ.get('DOMAIN_NAME', 'base_domain')
admin_name = os.environ.get('ADMIN_NAME', 'AdminServer')
domainhome = os.environ.get('DOMAIN_HOME', '/u01/oracle/user_projects/domains/' + domainname)
adminport = os.environ.get('ADMIN_PORT', '7001')
username = os.environ.get('ADMIN_USER', 'weblogic')
password = os.environ.get('ADMIN_PASSWORD', 'welcome1')
print('admin_name : [%s]' % admin_name);
print('admin_user : [%s]' % username);
print('admin_password : [%s]' % password);
print('admin_port : [%s]' % adminport);
print('domain_home : [%s]' % domainhome);
clusterName = os.environ.get("CLUSTER_NAME", "DockerCluster")
migratableTargetName = configProps.get("migratabletarget.name")
#machineName = configProps.get("machine.name")
# 2 - JMSServer details
jmsServerName = configProps.get("jms.server.name")
storeName = configProps.get("store.name")
storePath = configProps.get("store.path")
# 3 - SystemModule Details
systemModuleName = configProps.get("system.module.name")
# 4 - ConnectionFactory Details
connectionFactoryName = configProps.get("connection.factory.name")
ConnectionFactoryJNDIName = configProps.get("connection.factory.jndi.name")
# 5 - SubDeployment, Queue & Topic Details
SubDeploymentName = configProps.get("sub.deployment.name")
queueName = configProps.get("queue.name")
queueJNDIName = configProps.get("queue.jndi.name")
topicName = configProps.get("topic.name")
topicJNDIName = configProps.get("topic.jndi.name")
# Connect to the AdminServer
# ==========================
#Connection to the Server
print 'connection to Weblogic Admin Server'
connect(username,password,"t3://localhost:7001")
#Print Server Information
domainConfig()
serverNames = cmo.getServers()
machineNames = cmo.getMachines()
domainRuntime()
runningServer = ''
for server in serverNames:
name = server.getName()
print 'server : '+name
try:
cd('/ServerRuntimes/'+name)
except Exception, e:
print 'Server :'+name +' seems to be down '
print 'Starting Server '+name
start(name,'Server')
Thread.sleep(5000)
continue
serverState = cmo.getState()
if serverState == "RUNNING":
print 'Server ' + name + ' is :\033[1;32m' + serverState + '\033[0m'
elif serverState == "STARTING":
print 'Server ' + name + ' is :\033[1;33m' + serverState + '\033[0m'
elif serverState == "UNKNOWN":
print 'Server ' + name + ' is :\033[1;34m' + serverState + '\033[0m'
else:
print 'Server ' + name + ' is :\033[1;31m' + serverState + '\033[0m'
for server in serverNames:
name = server.getName()
try:
if 'admin' in name.lower():
continue
#if name == 'AdminServer' or name == 'Admin' or name:
# continue
cd('/ServerRuntimes/'+name)
runningServer = name
break
except Exception, e:
print 'Server :'+name+'seems to be down'
print 'Running Server '+runningServer
domainConfig()
clusterAddress=''
numberofservers=0
i=1;
for server in serverNames:
name = server.getName()
try:
numberofservers=i
if 'admin' in name.lower():
continue
print 'server '+name
cd('/')
print 'going to dockerNAP'
cd('/Servers/'+name+'/NetworkAccessPoints/dockerNAP')
print 'in dockerNAP'
portNumber = cmo.getPublicPort()
# portNumber = server.getListenPort()
print 'portNumber '+ `portNumber`
host = cmo.getPublicAddress()
print 'host ' + `host`
if i > 1:
clusterAddress = clusterAddress + ','
clusterAddress = clusterAddress + host + ':' + `portNumber`
i= i + 1
except Exception, e:
print 'Error creating up cluster Address'
print 'Cluster Address '+clusterAddress
machineName=''
i=1;
for machine in machineNames:
name = machine.getName()
try:
print 'machine '+name
if i > 1:
machineName = machineName + ','
machineName = machineName + `name`
i= i + 1
except Exception, e:
print 'Error creating up machine names'
print 'Candidate machines '+machineName
#Cleanup
###########################
cd('/')
edit()
print 'Removing JMS System Module, JMS Server & FileStore....'
startEdit()
cd('/')
cmo.destroyJMSSystemResource(getMBean('/JMSSystemResources/'+systemModuleName))
cmo.destroyJMSServer(getMBean('/JMSServers/'+jmsServerName))
cmo.destroyFileStore(getMBean('/FileStores/'+storeName))
activate()
#Create Migratable Target
##############################
#print 'Setting Migration Basis as Consensus...'
#cd('/')
#startEdit()
#The following required server restart. So, moved into ../config_NAP.py
#cd('/Clusters/'+clusterName)
#cmo.setMigrationBasis('consensus')
#cmo.setClusterAddress(clusterAddress)
machineArray = []
for machine in machineNames:
name = machine.getName()
machineArray.append(ObjectName('com.bea:Name='+name+',Type=Machine'))
for p in machineArray: print p
#print 'Candidate Machine array ' + machineArray
#set('CandidateMachinesForMigratableServers',jarray.array(machineArray, ObjectName))
#activate()
#List of all servers will be stored in the list
#print 'Creating Migratable Target...'
#candidateServerList = []
#for server_loop1 in serverNames:
# name = server_loop1.getName()
# if 'admin' in name.lower():
# continue
# candidateServerList.append(ObjectName('com.bea:Name='+name+',Type=Server'))
#
#for p in candidateServerList: print p
#cd('/')
#startEdit()
#ref = getMBean('/MigratableTargets/' + migratableTargetName)
#if(ref != None):
# print '########## Migratable Target already exists with name '+ migratableTargetName
#else:
# cmo.createMigratableTarget(migratableTargetName)
#cd('/MigratableTargets/'+migratableTargetName)
#cmo.setCluster(getMBean('/Clusters/'+clusterName))
#cmo.setUserPreferredServer(getMBean('/Servers/'+runningServer))
#cmo.setMigrationPolicy('exactly-once')
#set('ConstrainedCandidateServers',jarray.array(candidateServerList, ObjectName))
#cmo.setNumberOfRestartAttempts(6)
#cmo.setNonLocalPostAllowed(false)
#cmo.setRestartOnFailure(false)
#cmo.setPostScriptFailureFatal(true)
#cmo.setSecondsBetweenRestarts(30)
#activate()
#creating FileStore
############################
print 'Creating JMS FileStore....'
cd('/')
startEdit()
ref = getMBean('/FileStores/' + storeName)
if(ref != None):
print '########## File Store already exists with name '+ storeName
else:
cmo.createFileStore(storeName)
print '===> Created FileStore - ' + storeName
Thread.sleep(10)
cd('/FileStores/'+storeName)
cmo.setDirectory(storePath)
print 'Running Server '+runningServer
#set('Targets',jarray.array([ObjectName('com.bea:Name='+runningServer+' (migratable),Type=MigratableTarget')], ObjectName))
# set('Targets',jarray.array([ObjectName('com.bea:Name='+migratableTargetName+',Type=MigratableTarget')], ObjectName))
#activate()
#Creating JMS Server
############################
print 'Creating JMS Server....'
startEdit()
cd('/')
ref = getMBean('/JMSServers/' + jmsServerName)
if(ref != None):
print '########## JMS Server already exists with name '+ jmsServerName
else:
cmo.createJMSServer(jmsServerName)
print '===> Created JMS Server - ' + jmsServerName
Thread.sleep(10)
cd('/JMSServers/'+jmsServerName)
cmo.setPersistentStore(getMBean('/FileStores/'+storeName))
#set('Targets',jarray.array([ObjectName('com.bea:Name='+runningServer+' (migratable),Type=MigratableTarget')], ObjectName))
# set('Targets',jarray.array([ObjectName('com.bea:Name='+migratableTargetName+',Type=MigratableTarget')], ObjectName))
activate()
#Creating JMS Module
#########################
print 'Creating JMS Module....in cluster: '+clusterName
startEdit()
cd('/')
ref = getMBean('/JMSSystemResources/' + systemModuleName)
if(ref != None):
print '########## JMS System Module Already exists with name '+ systemModuleName
else:
cmo.createJMSSystemResource(systemModuleName)
print '===> Created JMS System Module - ' + systemModuleName
cd('/JMSSystemResources/'+systemModuleName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+clusterName+',Type=Cluster')], ObjectName))
activate()
#Creating JMS SubDeployment
############################
print 'Creating JMS SubDeployment....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
if(ref != None):
print '########## JMS SubDeployment Already exists with name '+ SubDeploymentName + 'in module '+systemModuleName
else:
cmo.createSubDeployment(SubDeploymentName)
print '===> Created JMS SubDeployment - ' + systemModuleName
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
activate()
#Creating JMS Connection Factory
###############################
print 'Creating JMS Connection Factory....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
if(ref != None):
print '########## JMS Connection Factory Already exists with name '+ connectionFactoryName + 'in module '+systemModuleName
else:
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName)
cmo.createConnectionFactory(connectionFactoryName)
print '===> Created Connection Factory - ' + connectionFactoryName
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
cmo.setJNDIName(ConnectionFactoryJNDIName)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/SecurityParams/'+connectionFactoryName)
cmo.setAttachJMSXUserId(false)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/ClientParams/'+connectionFactoryName)
cmo.setClientIdPolicy('Restricted')
cmo.setSubscriptionSharingPolicy('Exclusive')
cmo.setMessagesMaximum(10)
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName+'/TransactionParams/'+connectionFactoryName)
cmo.setXAConnectionFactoryEnabled(true)
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/ConnectionFactories/'+connectionFactoryName)
cmo.setSubDeploymentName(''+SubDeploymentName)
activate()
#Creating JMS Distributed Queue
##################################
print 'Creating JMS Distributed Queue....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedQueues/'+queueName)
if(ref != None):
print '########## JMS Queue Already exists with name '+ queueName + 'in module '+systemModuleName
else:
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName)
cmo.createUniformDistributedQueue(queueName)
print '===> Created Distributed Queue - ' + queueName
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedQueues/'+queueName)
cmo.setJNDIName(queueJNDIName)
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedQueues/'+queueName)
cmo.setSubDeploymentName(''+SubDeploymentName)
activate()
#Creating JMS Distributed Topic
#################################
print 'Creating JMS Distributed Topic....'
startEdit()
ref = getMBean('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedTopics/'+topicName)
if(ref != None):
print '########## JMS Topic Already exists with name '+ topicName + 'in module '+systemModuleName
else:
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName)
cmo.createUniformDistributedTopic(topicName)
print '===> Created Distributed Topic - ' + topicName
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedTopics/'+topicName)
cmo.setJNDIName(topicJNDIName)
cmo.setForwardingPolicy('Replicated')
cd('/JMSSystemResources/'+systemModuleName+'/SubDeployments/'+SubDeploymentName)
set('Targets',jarray.array([ObjectName('com.bea:Name='+jmsServerName+',Type=JMSServer')], ObjectName))
cd('/JMSSystemResources/'+systemModuleName+'/JMSResource/'+systemModuleName+'/UniformDistributedTopics/'+topicName)
cmo.setSubDeploymentName(''+SubDeploymentName)
activate()
disconnect()
exit()
print '###### Completed configuration of all required JMS Objects ##############'
| []
| []
| [
"ADMIN_USER",
"ADMIN_NAME",
"DOMAIN_HOME",
"DOMAIN_NAME",
"ADMIN_PORT",
"ADMIN_PASSWORD",
"CLUSTER_NAME"
]
| [] | ["ADMIN_USER", "ADMIN_NAME", "DOMAIN_HOME", "DOMAIN_NAME", "ADMIN_PORT", "ADMIN_PASSWORD", "CLUSTER_NAME"] | python | 7 | 0 | |
tests/fields/test_float_fields.py | import pytest
from invana_py.ogm.exceptions import FieldValidationError
from invana_py.ogm.fields import StringProperty, FloatProperty, LongProperty
from invana_py.ogm.models import VertexModel
from invana_py.connector.data_types import FloatType
from invana_py import InvanaGraph
import os
gremlin_url = os.environ.get("GREMLIN_SERVER_URL", "ws://megamind-ws:8182/gremlin")
graph = InvanaGraph(gremlin_url)
DEFAULT_USERNAME = "rrmerugu"
DEFAULT_POINTS_VALUE = 5
class Star(VertexModel):
graph = graph
properties = {
'name': StringProperty(min_length=3, max_length=30, trim_whitespaces=True),
'distance_from_earth_float': FloatProperty(default=DEFAULT_POINTS_VALUE, min_value=5,
max_value=1989000000000000 * 100),
}
class TestFloatField:
def test_field(self):
graph.g.V().drop()
star = Star.objects.create(name="Sun", distance_from_earth_float=FloatType(1989000000000000))
assert isinstance(star.properties.distance_from_earth_float, FloatType)
def test_field_max_value(self):
graph.g.V().drop()
with pytest.raises(FieldValidationError) as exec_info:
Star.objects.create(name="Sun", distance_from_earth_float=FloatType(1989000000000000000000000000000 * 10000))
assert "max_value for field" in exec_info.value.__str__()
def test_field_min_value(self):
graph.g.V().drop()
with pytest.raises(FieldValidationError) as exec_info:
Star.objects.create(name="Sun", distance_from_earth_float=2)
assert "min_value for field " in exec_info.value.__str__()
def test_field_default(self):
graph.g.V().drop()
star = Star.objects.create(name="Ravi")
assert star.properties.distance_from_earth_float == DEFAULT_POINTS_VALUE
| []
| []
| [
"GREMLIN_SERVER_URL"
]
| [] | ["GREMLIN_SERVER_URL"] | python | 1 | 0 | |
reminders.py | from application import handlers, Application
import os
app = Application(handlers, os.environ, debug=True)
db = app.db
celery = app.celery()
import tasks
login_session = app.login_session
client_id = app.google_client_id | []
| []
| []
| [] | [] | python | 0 | 0 | |
nmigen_boards/ice40_hx1k_blink_evn.py | import os
import subprocess
from nmigen.build import *
from nmigen.vendor.lattice_ice40 import *
from .resources import *
__all__ = ["ICE40HX1KBlinkEVNPlatform"]
class ICE40HX1KBlinkEVNPlatform(LatticeICE40Platform):
device = "iCE40HX1K"
package = "VQ100"
default_clk = "clk3p3"
resources = [
Resource("clk3p3", 0, Pins("13", dir="i"), Clock(3.3e6),
Attrs(GLOBAL=True, IO_STANDARD="SB_LVCMOS")),
*LEDResources(pins="59 56 53 51", attrs=Attrs(IO_STANDARD="SB_LVCMOS")),
Resource("touch", 0, Pins("60"), Attrs(IO_STANDARD="SB_LVCMOS")),
Resource("touch", 1, Pins("57"), Attrs(IO_STANDARD="SB_LVCMOS")),
Resource("touch", 2, Pins("54"), Attrs(IO_STANDARD="SB_LVCMOS")),
Resource("touch", 3, Pins("52"), Attrs(IO_STANDARD="SB_LVCMOS")),
*SPIFlashResources(0,
cs="49", clk="48", mosi="45", miso="46",
attrs=Attrs(IO_STANDARD="SB_LVCMOS")
),
]
connectors = [
Connector("pmod", 1, "10 9 8 7 - - 4 3 2 1 - -"), # J1
Connector("pmod", 5, "40 42 62 64 - - 37 41 63 45 - -"), # J5
Connector("pmod", 6, "25 24 21 20 - - 26 27 28 33 - -"), # J6
Connector("pmod", 11, "49 45 46 48 - -"), # J11
Connector("pmod", 12, "59 56 53 51 - -"), # J12
]
def toolchain_program(self, products, name):
iceburn = os.environ.get("ICEBURN", "iCEburn")
with products.extract("{}.bin".format(name)) as bitstream_filename:
subprocess.check_call([iceburn, "-evw", bitstream_filename])
if __name__ == "__main__":
from .test.blinky import *
ICE40HX1KBlinkEVNPlatform().build(Blinky(), do_program=True)
| []
| []
| [
"ICEBURN"
]
| [] | ["ICEBURN"] | python | 1 | 0 | |
schoolofnet/intensivaomicroservicos/order/queue/queue.go | package queue
import (
"fmt"
"os"
"github.com/streadway/amqp"
)
func Connect() (*amqp.Channel, error) {
connection, err := amqp.Dial(os.Getenv("AMQP_URL"))
if err != nil {
return nil, err
}
channel, err := connection.Channel()
if err != nil {
return nil, err
}
return channel, nil
}
func Consuming(name string, in chan []byte, channel *amqp.Channel) error {
queue, err := channel.QueueDeclare(name, true, false, false, false, nil)
if err != nil {
fmt.Println(err)
return err
}
messages, err := channel.Consume(queue.Name, "checkout", true, false, false, false, nil)
if err != nil {
fmt.Println(err)
return err
}
go func() {
for message := range messages {
in <- message.Body
}
}()
return nil
}
func Notify(exchange string, body []byte, channel *amqp.Channel) error {
err := channel.Publish(exchange, "", false, false, amqp.Publishing{
ContentType: "application/json",
Body: body,
})
return err
}
| [
"\"AMQP_URL\""
]
| []
| [
"AMQP_URL"
]
| [] | ["AMQP_URL"] | go | 1 | 0 | |
test/unit/test_allgamescoiny_things.py | import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_allgamescoin_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767n'
@pytest.fixture
def invalid_allgamescoin_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767m'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:19999',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:19999',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:19999',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
# valid masternode status enabled & running
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:19999",
"pubkey": "yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
# valid masternode but not running/waiting
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
# ========================================================================
def test_valid_allgamescoin_address():
from allgamescoinlib import is_valid_allgamescoin_address
main = valid_allgamescoin_address()
test = valid_allgamescoin_address('testnet')
assert is_valid_allgamescoin_address(main) is True
assert is_valid_allgamescoin_address(main, 'mainnet') is True
assert is_valid_allgamescoin_address(main, 'testnet') is False
assert is_valid_allgamescoin_address(test) is False
assert is_valid_allgamescoin_address(test, 'mainnet') is False
assert is_valid_allgamescoin_address(test, 'testnet') is True
def test_invalid_allgamescoin_address():
from allgamescoinlib import is_valid_allgamescoin_address
main = invalid_allgamescoin_address()
test = invalid_allgamescoin_address('testnet')
assert is_valid_allgamescoin_address(main) is False
assert is_valid_allgamescoin_address(main, 'mainnet') is False
assert is_valid_allgamescoin_address(main, 'testnet') is False
assert is_valid_allgamescoin_address(test) is False
assert is_valid_allgamescoin_address(test, 'mainnet') is False
assert is_valid_allgamescoin_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from allgamescoinlib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from allgamescoinlib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import allgamescoinlib
sb_data_hex = '7b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d'
sb_hash = '7ae8b02730113382ea75cbb1eecc497c3aa1fdd9e76e875e38617e07fb2cb21a'
hex_hash = "%x" % allgamescoinlib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import allgamescoinlib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(allgamescoinlib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(allgamescoinlib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(314.4).quantize(precision)
assert int(allgamescoinlib.blocks_to_seconds(16616)) == 2612035
| []
| []
| [
"SENTINEL_CONFIG"
]
| [] | ["SENTINEL_CONFIG"] | python | 1 | 0 | |
baostock2ora.py | import baostock as bs
import pandas as pd
import os
import cx_Oracle
import time
from datetime import datetime
from sqlalchemy import create_engine, types
# http://baostock.com/baostock/index.php/A%E8%82%A1K%E7%BA%BF%E6%95%B0%E6%8D%AE
#os.environ['TNS_ADMIN'] = 'd:/oracle/product/11.2.0/client/network/admin/'
os.environ['TNS_ADMIN'] = '/home/opc/Wallet_atp/'
#engine = create_engine('oracle://mytomcatapp1:AAbb##444AAbb##444@atp_high',max_identifier_length=128)
engine = create_engine('oracle://mytomcatapp1:TGByhn#258@atp_high',max_identifier_length=30)
print(engine)
#engine = create_engine(
#'oracle+cx_oracle://scott:tiger@RACDB12PDB1', max_identifier_length=30)
# con = cx_Oracle.connect(p_username, p_password, p_service)
'''
p_username = 'admin'
p_password = 'AAbb##444AAbb##444'
p_service = 'atp_high'
con = cx_Oracle.connect(p_username, p_password, p_service)
'''
class Downloader(object):
def __init__(self,
date_start='1990-01-01',
date_end='2020-03-23'):
self._bs = bs
bs.login()
self.date_start = date_start
# self.date_end = datetime.datetime.now().strftime("%Y-%m-%d")
self.date_end = date_end
self.fields = "date,code,open,high,low,close,volume,amount," \
"adjustflag,turn,tradestatus,pctChg,peTTM," \
"pbMRQ,psTTM,pcfNcfTTM,isST"
def exit(self):
bs.logout()
def get_codes_by_date(self, date):
print(date)
stock_rs = bs.query_all_stock(date)
stock_df = stock_rs.get_data()
print(stock_df)
with engine.connect() as connection:
connection.execute("delete from dm_baostock")
stock_df2 = stock_df.copy(deep=True)
stock_df.columns = ['CODE', 'TRADESTATUS', 'CODE_NAME']
stock_df['CODE'] = stock_df['CODE'].apply(
lambda x: str(x[0:2]).upper()+x[3:9])
stock_df.to_sql('dm_baostock', engine, index=False, if_exists='append', dtype={
'CODE': types.VARCHAR(length=8),
'TRADESTATUS': types.INTEGER()})
return stock_df2
def run(self):
stock_df = self.get_codes_by_date(self.date_end)
#return
for index, row in stock_df.iterrows():
print(f'processing {index} {row["code"]} {row["code_name"]}')
break
start_time=time.time()
#code = "sh.600037"
#code = "sz.002007"
#df_code = bs.query_history_k_data_plus(code, self.fields,
# adjustflag:复权类型,默认不复权:3;1:后复权;2:前复权。
df_code = bs.query_history_k_data_plus(row["code"], self.fields,
start_date=self.date_start,
end_date=self.date_end,
frequency="d", adjustflag="2").get_data()
#frequency="d", adjustflag="3").get_data()
#print('query_history_k_data_plus respond error_code:'+rs.error_code)
#print('query_history_k_data_plus respond error_msg :'+rs.error_msg)
#df_code = rs.get_data()
# code_name = row["code_name"].replace('*', '')
code = row["code"].replace('.', '').upper()
#print(code)
#code = code[0:2].upper()+code[3:9]
df_code.columns = ['RQ', 'CODE', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOLUME', 'AMOUNT',
'ADJUSTFLAG', 'TURN', 'TRADESTATUS', 'PCTCHG', 'PETTM', 'PBMRQ',
'PSTTM', 'PCFNCFTTM', 'ISST']
#print(df_code.columns)
df_code['RQ'] = pd.to_datetime(
df_code['RQ'], format='%Y-%m-%d')
df_code['CODE'] = code
# df_code['CODE'].apply(
# lambda x: str(x[0:2]).upper()+x[3:9])
df_code['VOLUME'].replace('', '0', inplace=True)
df_code['AMOUNT'].replace('', '0', inplace=True)
df_code['TURN'].replace('', '0', inplace=True)
df_code['PCTCHG'].replace('', '0', inplace=True)
df_code['PETTM'].replace('', '0', inplace=True)
df_code['PBMRQ'].replace('', '0', inplace=True)
df_code['PSTTM'].replace('', '0', inplace=True)
df_code['PCFNCFTTM'].replace('', '0', inplace=True)
df_code['ISST'].replace('', '0', inplace=True)
convert_dict = {'CODE': str,
'OPEN': float,
'HIGH': float,
'LOW': float,
'CLOSE': float,
'VOLUME': int,
'AMOUNT': float,
'ADJUSTFLAG': int,
'TURN': float, 'TRADESTATUS': int, 'PCTCHG': float, 'PETTM': float, 'PBMRQ': float,
'PSTTM': float, 'PCFNCFTTM': float, 'ISST': int
}
#print(df_code.head())
df_code = df_code.astype(convert_dict)
print(df_code)
#print(df_code.dtypes)
print(df_code.shape)
df_code.to_sql('hq_baostock', engine, index=False, if_exists='append', dtype={
'CODE': types.VARCHAR(length=8),
'ISST': types.INTEGER()})
end_time=time.time()
print('elapsed '+str(end_time-start_time))
#break
'''
, dtype={
'DATE': types.DateTime(),
'CODE': types.VARCHAR(length=9),
'OPEN': types.Float(precision=4, asdecimal=True),
'HIGH': types.Float(precision=4, asdecimal=True),
'LOW': types.Float(precision=4, asdecimal=True),
'CLOSE': types.Float(precision=4, asdecimal=True),
'VOLUME': types.INTEGER(),
'AMOUNT': types.Float(precision=4, asdecimal=True),
'ADJUSTFLAG': types.INTEGER(),
'TURN': types.Float(precision=6, asdecimal=True),
'TRADESTATUS': types.INTEGER(),
'PCTCHG': types.Float(precision=6, asdecimal=True),
'PETTM': types.Float(precision=6, asdecimal=True),
'PBMRQ': types.Float(precision=6, asdecimal=True),
'PSTTM': types.Float(precision=6, asdecimal=True),
'PCFNCFTTM': types.Float(precision=6, asdecimal=True),
'ISST': types.INTEGER()})
'''
#break
# df_code.to_csv(
# f'{self.output_dir}/{row["code"]}.{code_name}.csv', index=False)
#self.exit()
if __name__ == '__main__':
# 获取全部股票的日K线数据
#now=datetime.now()
#t = now.strftime("%Y-%m-%d")
#downloader = Downloader(date_start=t, date_end=t)
downloader = Downloader(date_start='1990-12-19', date_end='2020-06-12')
downloader.run()
| []
| []
| [
"TNS_ADMIN"
]
| [] | ["TNS_ADMIN"] | python | 1 | 0 | |
lib/exabgp/application/healthcheck.py | #!/usr/bin/env python
"""Healthchecker for exabgp.
This program is to be used as a process for exabgp. It will announce
some VIP depending on the state of a check whose a third-party program
wrapped by this program.
To use, declare this program as a process in your
:file:`/etc/exabgp/exabgp.conf`::
neighbor 192.0.2.1 {
router-id 192.0.2.2;
local-as 64496;
peer-as 64497;
}
process watch-haproxy {
run python -m exabgp healthcheck --cmd "curl -sf http://127.0.0.1/healthcheck" --label haproxy;
}
process watch-mysql {
run python -m exabgp healthcheck --cmd "mysql -u check -e 'SELECT 1'" --label mysql;
}
Use :option:`--help` to get options accepted by this program. A
configuration file is also possible. Such a configuration file looks
like this::
debug
name = haproxy
interval = 10
fast-interval = 1
command = curl -sf http://127.0.0.1/healthcheck
The left-part of each line is the corresponding long option.
When using label for loopback selection, the provided value should
match the beginning of the label without the interface prefix. In the
example above, this means that you should have addresses on lo
labelled ``lo:haproxy1``, ``lo:haproxy2``, etc.
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import subprocess
import re
import logging
import logging.handlers
import argparse
import signal
import time
import collections
logger = logging.getLogger("healthcheck")
try:
# Python 3.3+ or backport
from ipaddress import ip_address as _ip_address # pylint: disable=F0401
def ip_address(x):
try:
x = x.decode('ascii')
except AttributeError:
pass
return _ip_address(x)
except ImportError:
try:
# Python 2.6, 2.7, 3.2
from ipaddr import IPAddress as ip_address
except ImportError:
sys.stderr.write(
'\n'
'This program requires the python module ip_address (for python 3.3+) or ipaddr (for python 2.6, 2.7, 3.2)\n'
'Please pip install one of them with one of the following command.\n'
'> pip install ip_address\n'
'> pip install ipaddr\n'
'\n'
)
sys.exit(1)
def enum(*sequential):
"""Create a simple enumeration."""
return type(str("Enum"), (), dict(zip(sequential, sequential)))
def parse():
"""Parse arguments"""
formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__,
formatter_class=formatter)
g = parser.add_mutually_exclusive_group()
g.add_argument("--debug", "-d", action="store_true",
default=False,
help="enable debugging")
g.add_argument("--silent", "-s", action="store_true",
default=False,
help="don't log to console")
g.add_argument("--syslog-facility", "-sF", metavar="FACILITY",
nargs='?',
const="daemon",
default="daemon",
help=("log to syslog using FACILITY, "
"default FACILITY is daemon"))
g.add_argument("--no-syslog", action="store_true",
help="disable syslog logging")
parser.add_argument("--name", "-n", metavar="NAME",
help="name for this healthchecker")
parser.add_argument("--config", "-F", metavar="FILE", type=open,
help="read configuration from a file")
parser.add_argument("--pid", "-p", metavar="FILE",
type=argparse.FileType('w'),
help="write PID to the provided file")
parser.add_argument("--user", metavar="USER",
help="set user after setting loopback addresses")
parser.add_argument("--group", metavar="GROUP",
help="set group after setting loopback addresses")
g = parser.add_argument_group("checking healthiness")
g.add_argument("--interval", "-i", metavar='N',
default=5,
type=float,
help="wait N seconds between each healthcheck")
g.add_argument("--fast-interval", "-f", metavar='N',
default=1,
type=float, dest="fast",
help=("when a state change is about to occur, "
"wait N seconds between each healthcheck"))
g.add_argument("--timeout", "-t", metavar='N',
default=5,
type=int,
help="wait N seconds for the check command to execute")
g.add_argument("--rise", metavar='N',
default=3,
type=int,
help="check N times before considering the service up")
g.add_argument("--fall", metavar='N',
default=3,
type=int,
help="check N times before considering the service down")
g.add_argument("--disable", metavar='FILE',
type=str,
help="if FILE exists, the service is considered disabled")
g.add_argument("--command", "--cmd", "-c", metavar='CMD',
type=str,
help="command to use for healthcheck")
g = parser.add_argument_group("advertising options")
g.add_argument("--next-hop", "-N", metavar='IP',
type=ip_address,
help="self IP address to use as next hop")
g.add_argument("--ip", metavar='IP',
type=ip_address, dest="ips", action="append",
help="advertise this IP address")
g.add_argument("--no-ip-setup",
action="store_false", dest="ip_setup",
help="don't setup missing IP addresses")
g.add_argument("--label", default=None,
help="use the provided label to match loopback addresses")
g.add_argument("--start-ip", metavar='N',
type=int, default=0,
help="index of the first IP in the list of IP addresses")
g.add_argument("--up-metric", metavar='M',
type=int, default=100,
help="first IP get the metric M when the service is up")
g.add_argument("--down-metric", metavar='M',
type=int, default=1000,
help="first IP get the metric M when the service is down")
g.add_argument("--disabled-metric", metavar='M',
type=int, default=500,
help=("first IP get the metric M "
"when the service is disabled"))
g.add_argument("--increase", metavar='M',
type=int, default=1,
help=("for each additional IP address, "
"increase metric value by W"))
g.add_argument("--community", metavar="COMMUNITY",
type=str, default=None,
help="announce IPs with the supplied community")
g.add_argument("--as-path", metavar="ASPATH",
type=str, default=None,
help="announce IPs with the supplied as-path")
g.add_argument("--withdraw-on-down", action="store_true",
help=("Instead of increasing the metric on health failure, "
"withdraw the route"))
g = parser.add_argument_group("reporting")
g.add_argument("--execute", metavar='CMD',
type=str, action="append",
help="execute CMD on state change")
g.add_argument("--up-execute", metavar='CMD',
type=str, action="append",
help="execute CMD when the service becomes available")
g.add_argument("--down-execute", metavar='CMD',
type=str, action="append",
help="execute CMD when the service becomes unavailable")
g.add_argument("--disabled-execute", metavar='CMD',
type=str, action="append",
help="execute CMD when the service is disabled")
options = parser.parse_args()
if options.config is not None:
# A configuration file has been provided. Read each line and
# build an equivalent command line.
args = sum(["--{0}".format(l.strip()).split("=", 1)
for l in options.config.readlines()
if not l.strip().startswith("#") and l.strip()], [])
args = [x.strip() for x in args]
args.extend(sys.argv[1:])
options = parser.parse_args(args)
return options
def setup_logging(debug, silent, name, syslog_facility, syslog):
"""Setup logger"""
def syslog_address():
"""Return a sensitive syslog address"""
if sys.platform == "darwin":
return "/var/run/syslog"
if sys.platform.startswith("freebsd"):
return "/var/run/log"
if sys.platform.startswith("linux"):
return "/dev/log"
raise EnvironmentError("Unable to guess syslog address for your "
"platform, try to disable syslog")
logger.setLevel(debug and logging.DEBUG or logging.INFO)
enable_syslog = syslog and not debug
# To syslog
if enable_syslog:
facility = getattr(logging.handlers.SysLogHandler,
"LOG_{0}".format(syslog_facility.upper()))
sh = logging.handlers.SysLogHandler(address=str(syslog_address()),
facility=facility)
if name:
healthcheck_name = "healthcheck-{0}".format(name)
else:
healthcheck_name = "healthcheck"
sh.setFormatter(logging.Formatter(
"{0}[{1}]: %(message)s".format(
healthcheck_name,
os.getpid())))
logger.addHandler(sh)
# To console
toconsole = (hasattr(sys.stderr, "isatty") and
sys.stderr.isatty() and # pylint: disable=E1101
not silent)
if toconsole:
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
"%(levelname)s[%(name)s] %(message)s"))
logger.addHandler(ch)
def loopback_ips(label):
"""Retrieve loopback IP addresses"""
logger.debug("Retrieve loopback IP addresses")
addresses = []
if sys.platform.startswith("linux"):
# Use "ip" (ifconfig is not able to see all addresses)
ipre = re.compile(r"^(?P<index>\d+):\s+(?P<name>\S+)\s+inet6?\s+"
r"(?P<ip>[\da-f.:]+)/(?P<netmask>\d+)\s+.*")
labelre = re.compile(r".*\s+lo:(?P<label>\S+).*")
cmd = subprocess.Popen("/sbin/ip -o address show dev lo".split(),
shell=False, stdout=subprocess.PIPE)
else:
# Try with ifconfig
ipre = re.compile(r"^inet6?\s+(alias\s+)?(?P<ip>[\da-f.:]+)\s+"
r"(?:netmask 0x(?P<netmask>[0-9a-f]+)|"
r"prefixlen (?P<mask>\d+)).*")
cmd = subprocess.Popen("/sbin/ifconfig lo0".split(), shell=False,
stdout=subprocess.PIPE)
labelre = re.compile(r"")
for line in cmd.stdout:
line = line.decode("ascii", "ignore").strip()
mo = ipre.match(line)
if not mo:
continue
ip = ip_address(mo.group("ip"))
if not ip.is_loopback:
if label:
lmo = labelre.match(line)
if not lmo or not lmo.group("label").startswith(label):
continue
addresses.append(ip)
logger.debug("Loopback addresses: %s", addresses)
return addresses
def setup_ips(ips, label):
"""Setup missing IP on loopback interface"""
existing = set(loopback_ips(label))
toadd = set(ips) - existing
for ip in toadd:
logger.debug("Setup loopback IP address %s", ip)
with open(os.devnull, "w") as fnull:
cmd = ["ip", "address", "add", str(ip), "dev", "lo"]
if label:
cmd += ["label", "lo:{0}".format(label)]
subprocess.check_call(
cmd, stdout=fnull, stderr=fnull)
# If we setup IPs we should also remove them on SIGTERM
def sigterm_handler(signum, frame): # pylint: disable=W0612,W0613
remove_ips(ips, label)
signal.signal(signal.SIGTERM, sigterm_handler)
def remove_ips(ips, label):
"""Remove added IP on loopback interface"""
existing = set(loopback_ips(label))
# Get intersection of IPs (ips setup, and IPs configured by ExaBGP)
toremove = set(ips) | existing
for ip in toremove:
logger.debug("Remove loopback IP address %s", ip)
with open(os.devnull, "w") as fnull:
# We specify the prefix length due to ip addr warnings about wildcard deletion
cmd = ["ip", "address", "delete", str(ip) + "/32", "dev", "lo"]
if label:
cmd += ["label", "lo:{0}".format(label)]
try:
subprocess.check_call(
cmd, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
logger.warn("Unable to remove loopback IP address %s - is \
healthcheck running as root?", str(ip))
sys.exit(0)
def drop_privileges(user, group):
"""Drop privileges to specified user and group"""
if group is not None:
import grp
gid = grp.getgrnam(group).gr_gid
logger.debug("Dropping privileges to group {0}/{1}".format(group, gid))
try:
os.setresgid(gid, gid, gid)
except AttributeError:
os.setregid(gid, gid)
if user is not None:
import pwd
uid = pwd.getpwnam(user).pw_uid
logger.debug("Dropping privileges to user {0}/{1}".format(user, uid))
try:
os.setresuid(uid, uid, uid)
except AttributeError:
os.setreuid(uid, uid)
def check(cmd, timeout):
"""Check the return code of the given command.
:param cmd: command to execute. If :keyword:`None`, no command is executed.
:param timeout: how much time we should wait for command completion.
:return: :keyword:`True` if the command was successful or
:keyword:`False` if not or if the timeout was triggered.
"""
if cmd is None:
return True
class Alarm(Exception):
"""Exception to signal an alarm condition."""
pass
def alarm_handler(number, frame): # pylint: disable=W0613
"""Handle SIGALRM signal."""
raise Alarm()
logger.debug("Checking command %s", repr(cmd))
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setpgrp)
if timeout:
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(timeout)
try:
stdout = None
stdout, _ = p.communicate()
if timeout:
signal.alarm(0)
if p.returncode != 0:
logger.warn("Check command was unsuccessful: %s",
p.returncode)
if stdout.strip():
logger.info("Output of check command: %s", stdout)
return False
logger.debug(
"Command was executed successfully %s %s", p.returncode, stdout)
return True
except Alarm:
logger.warn("Timeout (%s) while running check command %s",
timeout, cmd)
os.killpg(p.pid, signal.SIGKILL)
return False
def loop(options):
"""Main loop."""
states = enum(
"INIT", # Initial state
"DISABLED", # Disabled state
"RISING", # Checks are currently succeeding.
"FALLING", # Checks are currently failing.
"UP", # Service is considered as up.
"DOWN", # Service is considered as down.
)
def exabgp(target):
"""Communicate new state to ExaBGP"""
if target not in (states.UP, states.DOWN, states.DISABLED):
return
logger.info("send announces for %s state to ExaBGP", target)
metric = vars(options).get("{0}_metric".format(str(target).lower()))
for ip in options.ips:
if options.withdraw_on_down:
command = "announce" if target is states.UP else "withdraw"
else:
command = "announce"
announce = "route {0}/{1} next-hop {2}".format(
str(ip),
ip.max_prefixlen,
options.next_hop or "self")
if command == "announce":
announce = "{0} med {1}".format(announce, metric)
if options.community:
announce = "{0} community [ {1} ]".format(
announce,
options.community)
if options.as_path:
announce = "{0} as-path [ {1} ]".format(
announce,
options.as_path)
logger.debug("exabgp: %s %s", command, announce)
print("{0} {1}".format(command, announce))
metric += options.increase
sys.stdout.flush()
def trigger(target):
"""Trigger a state change and execute the appropriate commands"""
# Shortcut for RISING->UP and FALLING->UP
if target == states.RISING and options.rise <= 1:
target = states.UP
elif target == states.FALLING and options.fall <= 1:
target = states.DOWN
# Log and execute commands
logger.debug("Transition to %s", str(target))
cmds = []
cmds.extend(vars(options).get("{0}_execute".format(
str(target).lower()), []) or [])
cmds.extend(vars(options).get("execute", []) or [])
for cmd in cmds:
logger.debug("Transition to %s, execute `%s`",
str(target), cmd)
env = os.environ.copy()
env.update({"STATE": str(target)})
with open(os.devnull, "w") as fnull:
subprocess.call(
cmd, shell=True, stdout=fnull, stderr=fnull, env=env)
return target
def one(checks, state):
"""Execute one loop iteration."""
disabled = (options.disable is not None and
os.path.exists(options.disable))
successful = disabled or check(options.command, options.timeout)
# FSM
if state != states.DISABLED and disabled:
state = trigger(states.DISABLED)
elif state == states.INIT:
if successful and options.rise <= 1:
state = trigger(states.UP)
elif successful:
state = trigger(states.RISING)
checks = 1
else:
state = trigger(states.FALLING)
checks = 1
elif state == states.DISABLED:
if not disabled:
state = trigger(states.INIT)
elif state == states.RISING:
if successful:
checks += 1
if checks >= options.rise:
state = trigger(states.UP)
else:
state = trigger(states.FALLING)
checks = 1
elif state == states.FALLING:
if not successful:
checks += 1
if checks >= options.fall:
state = trigger(states.DOWN)
else:
state = trigger(states.RISING)
checks = 1
elif state == states.UP:
if not successful:
state = trigger(states.FALLING)
checks = 1
elif state == states.DOWN:
if successful:
state = trigger(states.RISING)
checks = 1
else:
raise ValueError("Unhandled state: {0}".format(str(state)))
# Send announces. We announce them on a regular basis in case
# we lose connection with a peer.
exabgp(state)
return checks, state
checks = 0
state = states.INIT
while True:
checks, state = one(checks, state)
# How much we should sleep?
if state in (states.FALLING, states.RISING):
time.sleep(options.fast)
else:
time.sleep(options.interval)
def main():
"""Entry point."""
options = parse()
setup_logging(options.debug, options.silent, options.name,
options.syslog_facility, not options.no_syslog)
if options.pid:
options.pid.write("{0}\n".format(os.getpid()))
options.pid.close()
try:
# Setup IP to use
options.ips = options.ips or loopback_ips(options.label)
if not options.ips:
logger.error("No IP found")
sys.exit(1)
if options.ip_setup:
setup_ips(options.ips, options.label)
drop_privileges(options.user, options.group)
options.ips = collections.deque(options.ips)
options.ips.rotate(-options.start_ip)
options.ips = list(options.ips)
# Main loop
loop(options)
except Exception as e: # pylint: disable=W0703
logger.exception("Uncaught exception: %s", e)
sys.exit(1)
if __name__ == "__main__":
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
contrib/devtools/security-check.py | #!/usr/bin/env python
# Copyright (c) 2015-2017 The Crowncoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
| []
| []
| [
"OBJDUMP",
"READELF"
]
| [] | ["OBJDUMP", "READELF"] | python | 2 | 0 | |
datastore/cursors.go | package datastore
import (
"encoding/base64"
"errors"
"os"
"github.com/huysamen/gcpkit/utils/encryption"
"cloud.google.com/go/datastore"
)
func NewCursorCodecs() (encoder func(*datastore.Cursor) (string, error), decoder func(string) (*datastore.Cursor, error)) {
key := encryption.NewEncryptionKey(os.Getenv("GCPKIT_DATASTORE_CURSOR_SECRET"))
enc := func(cursor *datastore.Cursor) (string, error) {
if cursor == nil {
return "", errors.New("nil cursor")
}
encrypted, err := encryption.Encrypt([]byte(cursor.String()), key)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(encrypted), nil
}
dec := func(encoded string) (*datastore.Cursor, error) {
if encoded == "" {
return nil, errors.New("cursor not valid")
}
decoded, err := base64.URLEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
decrypted, err := encryption.Decrypt(decoded, key)
if err != nil {
return nil, err
}
cursor, err := datastore.DecodeCursor(string(decrypted))
if err != nil {
return nil, err
}
return &cursor, nil
}
return enc, dec
}
| [
"\"GCPKIT_DATASTORE_CURSOR_SECRET\""
]
| []
| [
"GCPKIT_DATASTORE_CURSOR_SECRET"
]
| [] | ["GCPKIT_DATASTORE_CURSOR_SECRET"] | go | 1 | 0 | |
src/main/python/systemds/context/systemds_context.py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = ["SystemDSContext"]
import copy
import json
import os
import socket
import threading
import time
from glob import glob
from queue import Empty, Queue
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from py4j.java_gateway import GatewayParameters, JavaGateway
from py4j.protocol import Py4JNetworkError
from systemds.operator import Frame, Matrix, OperationNode, Scalar, Source
from systemds.script_building import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.helpers import get_module_dir
class SystemDSContext(object):
"""A context with a connection to a java instance with which SystemDS operations are executed.
The java process is started and is running using a random tcp port for instruction parsing."""
java_gateway: JavaGateway
def __init__(self, port: int = -1):
"""Starts a new instance of SystemDSContext, in which the connection to a JVM systemds instance is handled
Any new instance of this SystemDS Context, would start a separate new JVM.
Standard out and standard error form the JVM is also handled in this class, filling up Queues,
that can be read from to get the printed statements from the JVM.
"""
command = self.__build_startup_command()
process, port = self.__try_startup(command, port)
# Handle Std out from the subprocess.
self.__stdout = Queue()
self.__stderr = Queue()
self.__stdout_thread = Thread(target=self.__enqueue_output, args=(
process.stdout, self.__stdout), daemon=True)
self.__stderr_thread = Thread(target=self.__enqueue_output, args=(
process.stderr, self.__stderr), daemon=True)
self.__stdout_thread.start()
self.__stderr_thread.start()
# Py4j connect to the started process.
gwp = GatewayParameters(port=port, eager_load=True)
self.java_gateway = JavaGateway(
gateway_parameters=gwp, java_process=process)
def get_stdout(self, lines: int = -1):
"""Getter for the stdout of the java subprocess
The output is taken from the stdout queue and returned in a new list.
:param lines: The number of lines to try to read from the stdout queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stdout.qsize() < lines:
return [self.__stdout.get() for x in range(self.__stdout.qsize())]
else:
return [self.__stdout.get() for x in range(lines)]
def get_stderr(self, lines: int = -1):
"""Getter for the stderr of the java subprocess
The output is taken from the stderr queue and returned in a new list.
:param lines: The number of lines to try to read from the stderr queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stderr.qsize() < lines:
return [self.__stderr.get() for x in range(self.__stderr.qsize())]
else:
return [self.__stderr.get() for x in range(lines)]
def exception_and_close(self, e: Exception):
"""
Method for printing exception, printing stdout and error, while also closing the context correctly.
:param e: the exception thrown
"""
# e = sys.exc_info()[0]
message = "Exception Encountered! closing JVM\n"
message += "standard out :\n" + "\n".join(self.get_stdout())
message += "standard error :\n" + "\n".join(self.get_stdout())
message += "Exception : " + str(e)
self.close()
raise RuntimeError(message)
def __try_startup(self, command, port, rep=0):
""" Try to perform startup of system.
:param command: The command to execute for starting JMLC content
:param port: The port to try to connect to to.
:param rep: The number of repeated tries to startup the jvm.
"""
if port == -1:
assignedPort = self.__get_open_port()
elif rep == 0:
assignedPort = port
else:
assignedPort = self.__get_open_port()
fullCommand = []
fullCommand.extend(command)
fullCommand.append(str(assignedPort))
process = Popen(fullCommand, stdout=PIPE, stdin=PIPE, stderr=PIPE)
try:
self.__verify_startup(process)
return process, assignedPort
except Exception as e:
self.close()
if rep > 3:
raise Exception(
"Failed to start SystemDS context with " + str(rep) + " repeated tries")
else:
rep += 1
print("Failed to startup JVM process, retrying: " + str(rep))
sleep(0.5)
return self.__try_startup(command, port, rep)
def __verify_startup(self, process):
first_stdout = process.stdout.readline()
if(not b"GatewayServer Started" in first_stdout):
stderr = process.stderr.readline().decode("utf-8")
if(len(stderr) > 1):
raise Exception(
"Exception in startup of GatewayServer: " + stderr)
outputs = []
outputs.append(first_stdout.decode("utf-8"))
max_tries = 10
for i in range(max_tries):
next_line = process.stdout.readline()
if(b"GatewayServer Started" in next_line):
print("WARNING: Stdout corrupted by prints: " + str(outputs))
print("Startup success")
break
else:
outputs.append(next_line)
if (i == max_tries-1):
raise Exception("Error in startup of systemDS gateway process: \n gateway StdOut: " + str(
outputs) + " \n gateway StdErr" + process.stderr.readline().decode("utf-8"))
def __build_startup_command(self):
command = ["java", "-cp"]
root = os.environ.get("SYSTEMDS_ROOT")
if root == None:
# If there is no systemds install default to use the PIP packaged java files.
root = os.path.join(get_module_dir(), "systemds-java")
# nt means its Windows
cp_separator = ";" if os.name == "nt" else ":"
if os.environ.get("SYSTEMDS_ROOT") != None:
lib_cp = os.path.join(root, "target", "lib", "*")
systemds_cp = os.path.join(root, "target", "SystemDS.jar")
classpath = cp_separator.join([lib_cp, systemds_cp])
command.append(classpath)
files = glob(os.path.join(root, "conf", "log4j*.properties"))
if len(files) > 1:
print(
"WARNING: Multiple logging files found selecting: " + files[0])
if len(files) == 0:
print("WARNING: No log4j file found at: "
+ os.path.join(root, "conf")
+ " therefore using default settings")
else:
command.append("-Dlog4j.configuration=file:" + files[0])
else:
lib_cp = os.path.join(root, "lib", "*")
command.append(lib_cp)
command.append("org.apache.sysds.api.PythonDMLScript")
return command
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# no errors to handle to allow continuation
return None
def close(self):
"""Close the connection to the java process and do necessary cleanup."""
if(self.__stdout_thread.is_alive()):
self.__stdout_thread.join(0)
if(self.__stdout_thread.is_alive()):
self.__stderr_thread.join(0)
pid = self.java_gateway.java_process.pid
if self.java_gateway.java_gateway_server is not None:
try:
self.java_gateway.shutdown(True)
except Py4JNetworkError as e:
if "Gateway is not connected" not in str(e):
self.java_gateway.java_process.kill()
os.kill(pid, 14)
def __enqueue_output(self, out, queue):
"""Method for handling the output from java.
It is locating the string handeling inside a different thread, since the 'out.readline' is a blocking command.
"""
for line in iter(out.readline, b""):
queue.put(line.decode("utf-8").strip())
def __get_open_port(self):
"""Get a random available port.
and hope that no other process steals it while we wait for the JVM to startup
"""
# https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def full(self, shape: Tuple[int, int], value: Union[float, int]) -> 'Matrix':
"""Generates a matrix completely filled with a value
:param sds_context: SystemDS context
:param shape: shape (rows and cols) of the matrix TODO tensor
:param value: the value to fill all cells with
:return: the OperationNode representing this operation
"""
unnamed_input_nodes = [value]
named_input_nodes = {'rows': shape[0], 'cols': shape[1]}
return Matrix(self, 'matrix', unnamed_input_nodes, named_input_nodes)
def seq(self, start: Union[float, int], stop: Union[float, int] = None,
step: Union[float, int] = 1) -> 'Matrix':
"""Create a single column vector with values from `start` to `stop` and an increment of `step`.
If no stop is defined and only one parameter is given, then start will be 0 and the parameter will be interpreted as
stop.
:param sds_context: SystemDS context
:param start: the starting value
:param stop: the maximum value
:param step: the step size
:return: the OperationNode representing this operation
"""
if stop is None:
stop = start
start = 0
unnamed_input_nodes = [start, stop, step]
return Matrix(self, 'seq', unnamed_input_nodes)
def rand(self, rows: int, cols: int,
min: Union[float, int] = None, max: Union[float, int] = None, pdf: str = "uniform",
sparsity: Union[float, int] = None, seed: Union[float, int] = None,
lambd: Union[float, int] = 1) -> 'Matrix':
"""Generates a matrix filled with random values
:param sds_context: SystemDS context
:param rows: number of rows
:param cols: number of cols
:param min: min value for cells
:param max: max value for cells
:param pdf: "uniform"/"normal"/"poison" distribution
:param sparsity: fraction of non-zero cells
:param seed: random seed
:param lambd: lamda value for "poison" distribution
:return:
"""
available_pdfs = ["uniform", "normal", "poisson"]
if rows < 0:
raise ValueError("In rand statement, can only assign rows a long (integer) value >= 0 "
"-- attempted to assign value: {r}".format(r=rows))
if cols < 0:
raise ValueError("In rand statement, can only assign cols a long (integer) value >= 0 "
"-- attempted to assign value: {c}".format(c=cols))
if pdf not in available_pdfs:
raise ValueError("The pdf passed is invalid! given: {g}, expected: {e}".format(
g=pdf, e=available_pdfs))
pdf = '\"' + pdf + '\"'
named_input_nodes = {
'rows': rows, 'cols': cols, 'pdf': pdf, 'lambda': lambd}
if min is not None:
named_input_nodes['min'] = min
if max is not None:
named_input_nodes['max'] = max
if sparsity is not None:
named_input_nodes['sparsity'] = sparsity
if seed is not None:
named_input_nodes['seed'] = seed
return Matrix(self, 'rand', [], named_input_nodes=named_input_nodes)
def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
""" Read an file from disk. Supportted types include:
CSV, Matrix Market(coordinate), Text(i,j,v), SystemDS Binay
See: http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions for more details
:return: an Operation Node, containing the read data.
"""
mdt_filepath = path + ".mtd"
if os.path.exists(mdt_filepath):
with open(mdt_filepath) as jspec_file:
mtd = json.load(jspec_file)
kwargs["data_type"] = mtd["data_type"]
data_type = kwargs.get("data_type", None)
file_format = kwargs.get("format", None)
if data_type == "matrix":
kwargs["data_type"] = f'"{data_type}"'
return Matrix(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "frame":
kwargs["data_type"] = f'"{data_type}"'
if isinstance(file_format, str):
kwargs["format"] = f'"{kwargs["format"]}"'
return Frame(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "scalar":
kwargs["data_type"] = f'"{data_type}"'
output_type = OutputType.from_str(kwargs.get("value_type", None))
kwargs["value_type"] = f'"{output_type.name}"'
return Scalar(self, "read", [f'"{path}"'], named_input_nodes=kwargs, output_type=output_type)
print("WARNING: Unknown type read please add a mtd file, or specify in arguments")
return OperationNode(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'Scalar':
""" Construct an scalar value, this can contain str, float, double, integers and booleans.
:return: An `OperationNode` containing the scalar value.
"""
if type(v) is str:
if not ((v[0] == '"' and v[-1] == '"') or (v[0] == "'" and v[-1] == "'")):
v = f'"{v}"'
# output type assign simply assigns the given variable to the value
# therefore the output type is assign.
return Scalar(self, v, assign=True, output_type=OutputType.from_str(v))
def from_numpy(self, mat: np.array,
*args: Sequence[VALID_INPUT_TYPES],
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Generate DAGNode representing matrix with data given by a numpy array, which will be sent to SystemDS
on need.
:param mat: the numpy array
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ['\'./tmp/{file_name}\'']
if len(mat.shape) == 2:
named_params = {'rows': mat.shape[0], 'cols': mat.shape[1]}
elif len(mat.shape) == 1:
named_params = {'rows': mat.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params.update(kwargs)
return Matrix(self, 'read', unnamed_params, named_params, local_data=mat)
def from_pandas(self, df: pd.DataFrame,
*args: Sequence[VALID_INPUT_TYPES], **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Frame:
"""Generate DAGNode representing frame with data given by a pandas dataframe, which will be sent to SystemDS
on need.
:param df: the pandas dataframe
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ["'./tmp/{file_name}'"]
if len(df.shape) == 2:
named_params = {'rows': df.shape[0], 'cols': df.shape[1]}
elif len(df.shape) == 1:
named_params = {'rows': df.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params["data_type"] = '"frame"'
self._pd_dataframe = df
named_params.update(kwargs)
return Frame(self, "read", unnamed_params, named_params, local_data=df)
def federated(self, addresses: Iterable[str],
ranges: Iterable[Tuple[Iterable[int], Iterable[int]]], *args,
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Create federated matrix object.
:param sds_context: the SystemDS context
:param addresses: addresses of the federated workers
:param ranges: for each federated worker a pair of begin and end index of their held matrix
:param args: unnamed params
:param kwargs: named params
:return: the OperationNode representing this operation
"""
addresses_str = 'list(' + \
','.join(map(lambda s: f'"{s}"', addresses)) + ')'
ranges_str = 'list('
for begin, end in ranges:
ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))}),'
ranges_str = ranges_str[:-1]
ranges_str += ')'
named_params = {'addresses': addresses_str, 'ranges': ranges_str}
named_params.update(kwargs)
return Matrix(self, 'federated', args, named_params)
def source(self, path: str, name: str, print_imported_methods: bool = False):
"""Import methods from a given dml file.
The importing is done thorugh the DML command source, and adds all defined methods from
the script to the Source object returned in python. This gives the flexibility to call the methods
directly on the object returned.
In systemds a method called func_01 can then be imported using
```python
res = self.sds.source("PATH_TO_FILE", "UNIQUE_NAME").func_01().compute(verbose = True)
```
:param path: The absolute or relative path to the file to import
:param name: The name to give the imported file in the script, this name must be unique
:param print_imported_methods: boolean specifying if the imported methods should be printed.
"""
return Source(self, path, name, print_imported_methods)
| []
| []
| [
"SYSTEMDS_ROOT"
]
| [] | ["SYSTEMDS_ROOT"] | python | 1 | 0 | |
src/backend/common/environment/environment.py | import enum
import os
from typing import Optional
from backend.common.environment.tasks import TasksRemoteConfig
@enum.unique
class EnvironmentMode(enum.Enum):
LOCAL = "local"
REMOTE = "remote"
# Mostly GAE env variables
# See https://cloud.google.com/appengine/docs/standard/python3/runtime#environment_variables
class Environment(object):
@staticmethod
def is_dev() -> bool:
return os.environ.get("GAE_ENV") == "localdev"
@staticmethod
def is_prod() -> bool:
env = os.environ.get("GAE_ENV")
return env is not None and env.startswith("standard")
@staticmethod
def service() -> Optional[str]:
return os.environ.get("GAE_SERVICE", None)
@staticmethod
def service_for_current_service() -> str:
# Get current service - otherwise, fallback on default service
service = Environment.service()
return service if service else "default"
@staticmethod
def project() -> Optional[str]:
return os.environ.get("GOOGLE_CLOUD_PROJECT", None)
@staticmethod
def log_level() -> Optional[str]:
return os.environ.get("TBA_LOG_LEVEL")
@staticmethod
def tasks_mode() -> EnvironmentMode:
return EnvironmentMode(os.environ.get("TASKS_MODE", "local"))
@staticmethod
def tasks_remote_config() -> Optional[TasksRemoteConfig]:
remote_config_ngrok_url = os.environ.get("TASKS_REMOTE_CONFIG_NGROK_URL", None)
if not remote_config_ngrok_url:
return None
return TasksRemoteConfig(ngrok_url=remote_config_ngrok_url)
@staticmethod
def ndb_log_level() -> Optional[str]:
return os.environ.get("NDB_LOG_LEVEL")
@staticmethod
def redis_url() -> Optional[str]:
return os.environ.get("REDIS_CACHE_URL")
| []
| []
| [
"TBA_LOG_LEVEL",
"NDB_LOG_LEVEL",
"REDIS_CACHE_URL",
"GAE_SERVICE",
"GAE_ENV",
"TASKS_MODE",
"GOOGLE_CLOUD_PROJECT",
"TASKS_REMOTE_CONFIG_NGROK_URL"
]
| [] | ["TBA_LOG_LEVEL", "NDB_LOG_LEVEL", "REDIS_CACHE_URL", "GAE_SERVICE", "GAE_ENV", "TASKS_MODE", "GOOGLE_CLOUD_PROJECT", "TASKS_REMOTE_CONFIG_NGROK_URL"] | python | 8 | 0 | |
WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/dash/dash.py | from __future__ import print_function
import itertools
import os
import random
import sys
import collections
import importlib
import json
import pkgutil
import threading
import warnings
import re
import logging
from functools import wraps
import plotly
import dash_renderer
import flask
from flask import Flask, Response
from flask_compress import Compress
from .dependencies import Event, Input, Output, State
from .resources import Scripts, Css
from .development.base_component import Component
from . import exceptions
from ._utils import AttributeDict as _AttributeDict
from ._utils import interpolate_str as _interpolate
from ._utils import format_tag as _format_tag
from ._utils import generate_hash as _generate_hash
from . import _watch
from ._utils import get_asset_path as _get_asset_path
from . import _configs
_default_index = '''<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
</footer>
</body>
</html>'''
_app_entry = '''
<div id="react-entry-point">
<div class="_dash-loading">
Loading...
</div>
</div>
'''
_re_index_entry = re.compile(r'{%app_entry%}')
_re_index_config = re.compile(r'{%config%}')
_re_index_scripts = re.compile(r'{%scripts%}')
_re_index_entry_id = re.compile(r'id="react-entry-point"')
_re_index_config_id = re.compile(r'id="_dash-config"')
_re_index_scripts_id = re.compile(r'src=".*dash[-_]renderer.*"')
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments, too-many-locals
class Dash(object):
def __init__(
self,
name='__main__',
server=None,
static_folder='static',
assets_folder=None,
assets_url_path='/assets',
assets_ignore='',
include_assets_files=True,
url_base_pathname=None,
assets_external_path=None,
requests_pathname_prefix=None,
routes_pathname_prefix=None,
compress=True,
meta_tags=None,
index_string=_default_index,
external_scripts=None,
external_stylesheets=None,
suppress_callback_exceptions=None,
components_cache_max_age=None,
**kwargs):
# pylint-disable: too-many-instance-attributes
if 'csrf_protect' in kwargs:
warnings.warn('''
`csrf_protect` is no longer used,
CSRF protection has been removed as it is no longer
necessary.
See https://github.com/plotly/dash/issues/141 for details.
''', DeprecationWarning)
name = name if server is None else server.name
self._assets_folder = assets_folder or os.path.join(
flask.helpers.get_root_path(name), 'assets'
)
self._assets_url_path = assets_url_path
# allow users to supply their own flask server
self.server = server or Flask(name, static_folder=static_folder)
if 'assets' not in self.server.blueprints:
self.server.register_blueprint(
flask.Blueprint('assets', 'assets',
static_folder=self._assets_folder,
static_url_path=assets_url_path))
env_configs = _configs.env_configs()
url_base_pathname, routes_pathname_prefix, requests_pathname_prefix = \
_configs.pathname_configs(
url_base_pathname,
routes_pathname_prefix,
requests_pathname_prefix,
environ_configs=env_configs)
self.url_base_pathname = url_base_pathname
self.config = _AttributeDict({
'suppress_callback_exceptions': _configs.get_config(
'suppress_callback_exceptions',
suppress_callback_exceptions, env_configs, False
),
'routes_pathname_prefix': routes_pathname_prefix,
'requests_pathname_prefix': requests_pathname_prefix,
'include_assets_files': _configs.get_config(
'include_assets_files',
include_assets_files,
env_configs,
True),
'assets_external_path': _configs.get_config(
'assets_external_path', assets_external_path, env_configs, ''),
'components_cache_max_age': int(_configs.get_config(
'components_cache_max_age', components_cache_max_age,
env_configs, 2678400))
})
# list of dependencies
self.callback_map = {}
self._index_string = ''
self.index_string = index_string
self._meta_tags = meta_tags or []
self._favicon = None
if compress:
# gzip
Compress(self.server)
@self.server.errorhandler(exceptions.PreventUpdate)
def _handle_error(error):
"""Handle a halted callback and return an empty 204 response"""
print(error, file=sys.stderr)
return ('', 204)
# static files from the packages
self.css = Css()
self.scripts = Scripts()
self._external_scripts = external_scripts or []
self._external_stylesheets = external_stylesheets or []
self.assets_ignore = assets_ignore
self.registered_paths = collections.defaultdict(set)
# urls
self.routes = []
self._add_url(
'{}_dash-layout'.format(self.config['routes_pathname_prefix']),
self.serve_layout)
self._add_url(
'{}_dash-dependencies'.format(
self.config['routes_pathname_prefix']),
self.dependencies)
self._add_url(
'{}_dash-update-component'.format(
self.config['routes_pathname_prefix']),
self.dispatch,
['POST'])
self._add_url((
'{}_dash-component-suites'
'/<string:package_name>'
'/<path:path_in_package_dist>').format(
self.config['routes_pathname_prefix']),
self.serve_component_suites)
self._add_url(
'{}_dash-routes'.format(self.config['routes_pathname_prefix']),
self.serve_routes)
self._add_url(
self.config['routes_pathname_prefix'],
self.index)
self._add_url(
'{}_reload-hash'.format(self.config['routes_pathname_prefix']),
self.serve_reload_hash)
# catch-all for front-end routes, used by dcc.Location
self._add_url(
'{}<path:path>'.format(self.config['routes_pathname_prefix']),
self.index)
self._add_url(
'{}_favicon.ico'.format(self.config['routes_pathname_prefix']),
self._serve_default_favicon)
self.server.before_first_request(self._setup_server)
self._layout = None
self._cached_layout = None
self._dev_tools = _AttributeDict({
'serve_dev_bundles': False,
'hot_reload': False,
'hot_reload_interval': 3000,
'hot_reload_watch_interval': 0.5,
'hot_reload_max_retry': 8
})
# add a handler for components suites errors to return 404
self.server.errorhandler(exceptions.InvalidResourceError)(
self._invalid_resources_handler)
self._assets_files = []
# hot reload
self._reload_hash = None
self._hard_reload = False
self._lock = threading.RLock()
self._watch_thread = None
self._changed_assets = []
self.logger = logging.getLogger(name)
self.logger.addHandler(logging.StreamHandler(stream=sys.stdout))
def _add_url(self, name, view_func, methods=('GET',)):
self.server.add_url_rule(
name,
view_func=view_func,
endpoint=name,
methods=list(methods))
# record the url in Dash.routes so that it can be accessed later
# e.g. for adding authentication with flask_login
self.routes.append(name)
@property
def layout(self):
return self._layout
def _layout_value(self):
if isinstance(self._layout, collections.Callable):
self._cached_layout = self._layout()
else:
self._cached_layout = self._layout
return self._cached_layout
@layout.setter
def layout(self, value):
if (not isinstance(value, Component) and
not isinstance(value, collections.Callable)):
raise Exception(
''
'Layout must be a dash component '
'or a function that returns '
'a dash component.')
self._layout = value
layout_value = self._layout_value()
# pylint: disable=protected-access
self.css._update_layout(layout_value)
self.scripts._update_layout(layout_value)
@property
def index_string(self):
return self._index_string
@index_string.setter
def index_string(self, value):
checks = (
(_re_index_entry.search(value), 'app_entry'),
(_re_index_config.search(value), 'config',),
(_re_index_scripts.search(value), 'scripts'),
)
missing = [missing for check, missing in checks if not check]
if missing:
raise Exception(
'Did you forget to include {} in your index string ?'.format(
', '.join('{%' + x + '%}' for x in missing)
)
)
self._index_string = value
def serve_layout(self):
layout = self._layout_value()
# TODO - Set browser cache limit - pass hash into frontend
return flask.Response(
json.dumps(layout,
cls=plotly.utils.PlotlyJSONEncoder),
mimetype='application/json'
)
def _config(self):
config = {
'url_base_pathname': self.url_base_pathname,
'requests_pathname_prefix': self.config['requests_pathname_prefix']
}
if self._dev_tools.hot_reload:
config['hot_reload'] = {
'interval': self._dev_tools.hot_reload_interval,
'max_retry': self._dev_tools.hot_reload_max_retry
}
return config
def serve_reload_hash(self):
hard = self._hard_reload
changed = self._changed_assets
self._lock.acquire()
self._hard_reload = False
self._changed_assets = []
self._lock.release()
return flask.jsonify({
'reloadHash': self._reload_hash,
'hard': hard,
'packages': list(self.registered_paths.keys()),
'files': list(changed)
})
def serve_routes(self):
return flask.Response(
json.dumps(self.routes,
cls=plotly.utils.PlotlyJSONEncoder),
mimetype='application/json'
)
def _collect_and_register_resources(self, resources):
# now needs the app context.
# template in the necessary component suite JS bundles
# add the version number of the package as a query parameter
# for cache busting
def _relative_url_path(relative_package_path='', namespace=''):
# track the registered packages
self.registered_paths[namespace].add(relative_package_path)
module_path = os.path.join(
os.path.dirname(sys.modules[namespace].__file__),
relative_package_path)
modified = int(os.stat(module_path).st_mtime)
return '{}_dash-component-suites/{}/{}?v={}&m={}'.format(
self.config['requests_pathname_prefix'],
namespace,
relative_package_path,
importlib.import_module(namespace).__version__,
modified
)
srcs = []
for resource in resources:
if 'relative_package_path' in resource:
if isinstance(resource['relative_package_path'], str):
srcs.append(_relative_url_path(**resource))
else:
for rel_path in resource['relative_package_path']:
srcs.append(_relative_url_path(
relative_package_path=rel_path,
namespace=resource['namespace']
))
elif 'external_url' in resource:
if isinstance(resource['external_url'], str):
srcs.append(resource['external_url'])
else:
for url in resource['external_url']:
srcs.append(url)
elif 'absolute_path' in resource:
raise Exception(
'Serving files from absolute_path isn\'t supported yet'
)
elif 'asset_path' in resource:
static_url = self.get_asset_url(resource['asset_path'])
# Add a bust query param
static_url += '?m={}'.format(resource['ts'])
srcs.append(static_url)
return srcs
def _generate_css_dist_html(self):
links = self._external_stylesheets + \
self._collect_and_register_resources(self.css.get_all_css())
return '\n'.join([
_format_tag('link', link, opened=True)
if isinstance(link, dict)
else '<link rel="stylesheet" href="{}">'.format(link)
for link in links
])
def _generate_scripts_html(self):
# Dash renderer has dependencies like React which need to be rendered
# before every other script. However, the dash renderer bundle
# itself needs to be rendered after all of the component's
# scripts have rendered.
# The rest of the scripts can just be loaded after React but before
# dash renderer.
# pylint: disable=protected-access
srcs = self._collect_and_register_resources(
self.scripts._resources._filter_resources(
dash_renderer._js_dist_dependencies,
dev_bundles=self._dev_tools.serve_dev_bundles
)) + self._external_scripts + self._collect_and_register_resources(
self.scripts.get_all_scripts(
dev_bundles=self._dev_tools.serve_dev_bundles) +
self.scripts._resources._filter_resources(
dash_renderer._js_dist,
dev_bundles=self._dev_tools.serve_dev_bundles
))
return '\n'.join([
_format_tag('script', src)
if isinstance(src, dict)
else '<script src="{}"></script>'.format(src)
for src in srcs
])
def _generate_config_html(self):
return (
'<script id="_dash-config" type="application/json">'
'{}'
'</script>'
).format(json.dumps(self._config()))
def _generate_meta_html(self):
has_ie_compat = any(
x.get('http-equiv', '') == 'X-UA-Compatible'
for x in self._meta_tags)
has_charset = any('charset' in x for x in self._meta_tags)
tags = []
if not has_ie_compat:
tags.append(
'<meta http-equiv="X-UA-Compatible" content="IE=edge">'
)
if not has_charset:
tags.append('<meta charset="UTF-8">')
tags = tags + [
_format_tag('meta', x, opened=True) for x in self._meta_tags
]
return '\n '.join(tags)
# Serve the JS bundles for each package
def serve_component_suites(self, package_name, path_in_package_dist):
if package_name not in self.registered_paths:
raise exceptions.InvalidResourceError(
'Error loading dependency.\n'
'"{}" is not a registered library.\n'
'Registered libraries are: {}'
.format(package_name, list(self.registered_paths.keys())))
elif path_in_package_dist not in self.registered_paths[package_name]:
raise exceptions.InvalidResourceError(
'"{}" is registered but the path requested is not valid.\n'
'The path requested: "{}"\n'
'List of registered paths: {}'
.format(
package_name,
path_in_package_dist,
self.registered_paths
)
)
mimetype = ({
'js': 'application/JavaScript',
'css': 'text/css'
})[path_in_package_dist.split('.')[-1]]
headers = {
'Cache-Control': 'public, max-age={}'.format(
self.config.components_cache_max_age)
}
return Response(
pkgutil.get_data(package_name, path_in_package_dist),
mimetype=mimetype,
headers=headers
)
def index(self, *args, **kwargs): # pylint: disable=unused-argument
scripts = self._generate_scripts_html()
css = self._generate_css_dist_html()
config = self._generate_config_html()
metas = self._generate_meta_html()
title = getattr(self, 'title', 'Dash')
if self._favicon:
favicon_mod_time = os.path.getmtime(
os.path.join(self._assets_folder, self._favicon))
favicon_url = self.get_asset_url(self._favicon) + '?m={}'.format(
favicon_mod_time
)
else:
favicon_url = '{}_favicon.ico'.format(
self.config.requests_pathname_prefix)
favicon = _format_tag('link', {
'rel': 'icon',
'type': 'image/x-icon',
'href': favicon_url
}, opened=True)
index = self.interpolate_index(
metas=metas, title=title, css=css, config=config,
scripts=scripts, app_entry=_app_entry, favicon=favicon)
checks = (
(_re_index_entry_id.search(index), '#react-entry-point'),
(_re_index_config_id.search(index), '#_dash-configs'),
(_re_index_scripts_id.search(index), 'dash-renderer'),
)
missing = [missing for check, missing in checks if not check]
if missing:
plural = 's' if len(missing) > 1 else ''
raise Exception(
'Missing element{pl} {ids} in index.'.format(
ids=', '.join(missing),
pl=plural
)
)
return index
def interpolate_index(self,
metas='', title='', css='', config='',
scripts='', app_entry='', favicon=''):
"""
Called to create the initial HTML string that is loaded on page.
Override this method to provide you own custom HTML.
:Example:
class MyDash(dash.Dash):
def interpolate_index(self, **kwargs):
return '''
<!DOCTYPE html>
<html>
<head>
<title>My App</title>
</head>
<body>
<div id="custom-header">My custom header</div>
{app_entry}
{config}
{scripts}
<div id="custom-footer">My custom footer</div>
</body>
</html>
'''.format(
app_entry=kwargs.get('app_entry'),
config=kwargs.get('config'),
scripts=kwargs.get('scripts'))
:param metas: Collected & formatted meta tags.
:param title: The title of the app.
:param css: Collected & formatted css dependencies as <link> tags.
:param config: Configs needed by dash-renderer.
:param scripts: Collected & formatted scripts tags.
:param app_entry: Where the app will render.
:param favicon: A favicon <link> tag if found in assets folder.
:return: The interpolated HTML string for the index.
"""
return _interpolate(self.index_string,
metas=metas,
title=title,
css=css,
config=config,
scripts=scripts,
favicon=favicon,
app_entry=app_entry)
def dependencies(self):
return flask.jsonify([
{
'output': {
'id': k.split('.')[0],
'property': k.split('.')[1]
},
'inputs': v['inputs'],
'state': v['state'],
'events': v['events']
} for k, v in self.callback_map.items()
])
# pylint: disable=unused-argument, no-self-use
def react(self, *args, **kwargs):
raise exceptions.DashException(
'Yo! `react` is no longer used. \n'
'Use `callback` instead. `callback` has a new syntax too, '
'so make sure to call `help(app.callback)` to learn more.')
def _validate_callback(self, output, inputs, state, events):
# pylint: disable=too-many-branches
layout = self._cached_layout or self._layout_value()
if (layout is None and
not self.config.first('suppress_callback_exceptions',
'supress_callback_exceptions')):
# Without a layout, we can't do validation on the IDs and
# properties of the elements in the callback.
raise exceptions.LayoutIsNotDefined('''
Attempting to assign a callback to the application but
the `layout` property has not been assigned.
Assign the `layout` property before assigning callbacks.
Alternatively, suppress this warning by setting
`app.config['suppress_callback_exceptions']=True`
'''.replace(' ', ''))
for args, obj, name in [([output], Output, 'Output'),
(inputs, Input, 'Input'),
(state, State, 'State'),
(events, Event, 'Event')]:
if not isinstance(args, list):
raise exceptions.IncorrectTypeException(
'The {} argument `{}` is '
'not a list of `dash.dependencies.{}`s.'.format(
name.lower(), str(args), name
))
for arg in args:
if not isinstance(arg, obj):
raise exceptions.IncorrectTypeException(
'The {} argument `{}` is '
'not of type `dash.{}`.'.format(
name.lower(), str(arg), name
))
if (not self.config.first('suppress_callback_exceptions',
'supress_callback_exceptions') and
arg.component_id not in layout and
arg.component_id != getattr(layout, 'id', None)):
raise exceptions.NonExistantIdException('''
Attempting to assign a callback to the
component with the id "{}" but no
components with id "{}" exist in the
app\'s layout.\n\n
Here is a list of IDs in layout:\n{}\n\n
If you are assigning callbacks to components
that are generated by other callbacks
(and therefore not in the initial layout), then
you can suppress this exception by setting
`app.config['suppress_callback_exceptions']=True`.
'''.format(
arg.component_id,
arg.component_id,
list(layout.keys()) + (
[] if not hasattr(layout, 'id') else
[layout.id]
)
).replace(' ', ''))
if not self.config.first('suppress_callback_exceptions',
'supress_callback_exceptions'):
if getattr(layout, 'id', None) == arg.component_id:
component = layout
else:
component = layout[arg.component_id]
if (hasattr(arg, 'component_property') and
arg.component_property not in
component.available_properties and not
any(arg.component_property.startswith(w) for w in
component.available_wildcard_properties)):
raise exceptions.NonExistantPropException('''
Attempting to assign a callback with
the property "{}" but the component
"{}" doesn't have "{}" as a property.\n
Here is a list of the available properties in "{}":
{}
'''.format(
arg.component_property,
arg.component_id,
arg.component_property,
arg.component_id,
component.available_properties).replace(
' ', ''))
if (hasattr(arg, 'component_event') and
arg.component_event not in
component.available_events):
raise exceptions.NonExistantEventException('''
Attempting to assign a callback with
the event "{}" but the component
"{}" doesn't have "{}" as an event.\n
Here is a list of the available events in "{}":
{}
'''.format(
arg.component_event,
arg.component_id,
arg.component_event,
arg.component_id,
component.available_events).replace(' ', ''))
if state and not events and not inputs:
raise exceptions.MissingEventsException('''
This callback has {} `State` {}
but no `Input` elements or `Event` elements.\n
Without `Input` or `Event` elements, this callback
will never get called.\n
(Subscribing to input components will cause the
callback to be called whenver their values
change and subscribing to an event will cause the
callback to be called whenever the event is fired.)
'''.format(
len(state),
'elements' if len(state) > 1 else 'element'
).replace(' ', ''))
if '.' in output.component_id:
raise exceptions.IDsCantContainPeriods('''The Output element
`{}` contains a period in its ID.
Periods are not allowed in IDs right now.'''.format(
output.component_id
))
callback_id = '{}.{}'.format(
output.component_id, output.component_property)
if callback_id in self.callback_map:
raise exceptions.CantHaveMultipleOutputs('''
You have already assigned a callback to the output
with ID "{}" and property "{}". An output can only have
a single callback function. Try combining your inputs and
callback functions together into one function.
'''.format(
output.component_id,
output.component_property).replace(' ', ''))
def _validate_callback_output(self, output_value, output):
valid = [str, dict, int, float, type(None), Component]
def _raise_invalid(bad_val, outer_val, bad_type, path, index=None,
toplevel=False):
outer_id = "(id={:s})".format(outer_val.id) \
if getattr(outer_val, 'id', False) else ''
outer_type = type(outer_val).__name__
raise exceptions.InvalidCallbackReturnValue('''
The callback for property `{property:s}` of component `{id:s}`
returned a {object:s} having type `{type:s}`
which is not JSON serializable.
{location_header:s}{location:s}
and has string representation
`{bad_val}`
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
'''.format(
property=output.component_property,
id=output.component_id,
object='tree with one value' if not toplevel else 'value',
type=bad_type,
location_header=(
'The value in question is located at'
if not toplevel else
'''The value in question is either the only value returned,
or is in the top level of the returned list,'''
),
location=(
"\n" +
("[{:d}] {:s} {:s}".format(index, outer_type, outer_id)
if index is not None
else ('[*] ' + outer_type + ' ' + outer_id))
+ "\n" + path + "\n"
) if not toplevel else '',
bad_val=bad_val).replace(' ', ''))
def _value_is_valid(val):
return (
# pylint: disable=unused-variable
any([isinstance(val, x) for x in valid]) or
type(val).__name__ == 'unicode'
)
def _validate_value(val, index=None):
# val is a Component
if isinstance(val, Component):
for p, j in val.traverse_with_paths():
# check each component value in the tree
if not _value_is_valid(j):
_raise_invalid(
bad_val=j,
outer_val=val,
bad_type=type(j).__name__,
path=p,
index=index
)
# Children that are not of type Component or
# list/tuple not returned by traverse
child = getattr(j, 'children', None)
if not isinstance(child, (tuple,
collections.MutableSequence)):
if child and not _value_is_valid(child):
_raise_invalid(
bad_val=child,
outer_val=val,
bad_type=type(child).__name__,
path=p + "\n" + "[*] " + type(child).__name__,
index=index
)
# Also check the child of val, as it will not be returned
child = getattr(val, 'children', None)
if not isinstance(child, (tuple, collections.MutableSequence)):
if child and not _value_is_valid(child):
_raise_invalid(
bad_val=child,
outer_val=val,
bad_type=type(child).__name__,
path=type(child).__name__,
index=index
)
# val is not a Component, but is at the top level of tree
else:
if not _value_is_valid(val):
_raise_invalid(
bad_val=val,
outer_val=type(val).__name__,
bad_type=type(val).__name__,
path='',
index=index,
toplevel=True
)
if isinstance(output_value, list):
for i, val in enumerate(output_value):
_validate_value(val, index=i)
else:
_validate_value(output_value)
# TODO - Update nomenclature.
# "Parents" and "Children" should refer to the DOM tree
# and not the dependency tree.
# The dependency tree should use the nomenclature
# "observer" and "controller".
# "observers" listen for changes from their "controllers". For example,
# if a graph depends on a dropdown, the graph is the "observer" and the
# dropdown is a "controller". In this case the graph's "dependency" is
# the dropdown.
# TODO - Check this map for recursive or other ill-defined non-tree
# relationships
# pylint: disable=dangerous-default-value
def callback(self, output, inputs=[], state=[], events=[]):
self._validate_callback(output, inputs, state, events)
callback_id = '{}.{}'.format(
output.component_id, output.component_property
)
self.callback_map[callback_id] = {
'inputs': [
{'id': c.component_id, 'property': c.component_property}
for c in inputs
],
'state': [
{'id': c.component_id, 'property': c.component_property}
for c in state
],
'events': [
{'id': c.component_id, 'event': c.component_event}
for c in events
]
}
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
output_value = func(*args, **kwargs)
response = {
'response': {
'props': {
output.component_property: output_value
}
}
}
try:
jsonResponse = json.dumps(
response,
cls=plotly.utils.PlotlyJSONEncoder
)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue('''
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
'''.format(property=output.component_property,
id=output.component_id))
return flask.Response(
jsonResponse,
mimetype='application/json'
)
self.callback_map[callback_id]['callback'] = add_context
return add_context
return wrap_func
def dispatch(self):
body = flask.request.get_json()
inputs = body.get('inputs', [])
state = body.get('state', [])
output = body['output']
target_id = '{}.{}'.format(output['id'], output['property'])
args = []
for component_registration in self.callback_map[target_id]['inputs']:
args.append([
c.get('value', None) for c in inputs if
c['property'] == component_registration['property'] and
c['id'] == component_registration['id']
][0])
for component_registration in self.callback_map[target_id]['state']:
args.append([
c.get('value', None) for c in state if
c['property'] == component_registration['property'] and
c['id'] == component_registration['id']
][0])
return self.callback_map[target_id]['callback'](*args)
def _validate_layout(self):
if self.layout is None:
raise exceptions.NoLayoutException(
''
'The layout was `None` '
'at the time that `run_server` was called. '
'Make sure to set the `layout` attribute of your application '
'before running the server.')
to_validate = self._layout_value()
layout_id = getattr(self.layout, 'id', None)
component_ids = {layout_id} if layout_id else set()
for component in to_validate.traverse():
component_id = getattr(component, 'id', None)
if component_id and component_id in component_ids:
raise exceptions.DuplicateIdError(
'Duplicate component id found'
' in the initial layout: `{}`'.format(component_id))
component_ids.add(component_id)
def _setup_server(self):
if self.config.include_assets_files:
self._walk_assets_directory()
self._validate_layout()
self._generate_scripts_html()
self._generate_css_dist_html()
def _add_assets_resource(self, url_path, file_path):
res = {'asset_path': url_path, 'filepath': file_path}
if self.config.assets_external_path:
res['external_url'] = '{}{}'.format(
self.config.assets_external_path, url_path)
self._assets_files.append(file_path)
return res
def _walk_assets_directory(self):
walk_dir = self._assets_folder
slash_splitter = re.compile(r'[\\/]+')
ignore_filter = re.compile(self.assets_ignore) \
if self.assets_ignore else None
for current, _, files in os.walk(walk_dir):
if current == walk_dir:
base = ''
else:
s = current.replace(walk_dir, '').lstrip('\\').lstrip('/')
splitted = slash_splitter.split(s)
if len(splitted) > 1:
base = '/'.join(slash_splitter.split(s))
else:
base = splitted[0]
files_gen = (x for x in files if not ignore_filter.search(x)) \
if ignore_filter else files
for f in sorted(files_gen):
if base:
path = '/'.join([base, f])
else:
path = f
full = os.path.join(current, f)
if f.endswith('js'):
self.scripts.append_script(
self._add_assets_resource(path, full))
elif f.endswith('css'):
self.css.append_css(self._add_assets_resource(path, full))
elif f == 'favicon.ico':
self._favicon = path
def _invalid_resources_handler(self, err):
return err.args[0], 404
def _serve_default_favicon(self):
headers = {
'Cache-Control': 'public, max-age={}'.format(
self.config.components_cache_max_age)
}
return flask.Response(pkgutil.get_data('dash', 'favicon.ico'),
headers=headers,
content_type='image/x-icon')
def get_asset_url(self, path):
asset = _get_asset_path(
self.config.requests_pathname_prefix,
self.config.routes_pathname_prefix,
path,
self._assets_url_path.lstrip('/')
)
return asset
def enable_dev_tools(self,
debug=False,
dev_tools_serve_dev_bundles=None,
dev_tools_hot_reload=None,
dev_tools_hot_reload_interval=None,
dev_tools_hot_reload_watch_interval=None,
dev_tools_hot_reload_max_retry=None,
dev_tools_silence_routes_logging=None):
"""
Activate the dev tools, called by `run_server`. If your application is
served by wsgi and you want to activate the dev tools, you can call
this method out of `__main__`.
If an argument is not provided, it can be set with environment
variables.
Available dev_tools environment variables:
- DASH_DEBUG
- DASH_SERVE_DEV_BUNDLES
- DASH_HOT_RELOAD
- DASH_HOT_RELOAD_INTERVAL
- DASH_HOT_RELOAD_WATCH_INTERVAL
- DASH_HOT_RELOAD_MAX_RETRY
- DASH_SILENCE_ROUTES_LOGGING
:param debug: If True, then activate all the tools unless specifically
disabled by the arguments or by environ variables. Available as
`DASH_DEBUG` environment variable.
:type debug: bool
:param dev_tools_serve_dev_bundles: Serve the dev bundles. Available
as `DASH_SERVE_DEV_BUNDLES` environment variable.
:type dev_tools_serve_dev_bundles: bool
:param dev_tools_hot_reload: Activate the hot reloading. Available as
`DASH_HOT_RELOAD` environment variable.
:type dev_tools_hot_reload: bool
:param dev_tools_hot_reload_interval: Interval at which the client will
request the reload hash. Available as `DASH_HOT_RELOAD_INTERVAL`
environment variable.
:type dev_tools_hot_reload_interval: int
:param dev_tools_hot_reload_watch_interval: Interval at which the
assets folder are walked for changes. Available as
`DASH_HOT_RELOAD_WATCH_INTERVAL` environment variable.
:type dev_tools_hot_reload_watch_interval: float
:param dev_tools_hot_reload_max_retry: Maximum amount of retries before
failing and display a pop up. Default 30. Available as
`DASH_HOT_RELOAD_MAX_RETRY` environment variable.
:type dev_tools_hot_reload_max_retry: int
:param dev_tools_silence_routes_logging: Silence the `werkzeug` logger,
will remove all routes logging. Available as
`DASH_SILENCE_ROUTES_LOGGING` environment variable.
:type dev_tools_silence_routes_logging: bool
:return: debug
"""
env = _configs.env_configs()
debug = debug or _configs.get_config('debug', None, env, debug,
is_bool=True)
self._dev_tools['serve_dev_bundles'] = _configs.get_config(
'serve_dev_bundles', dev_tools_serve_dev_bundles, env,
default=debug,
is_bool=True
)
self._dev_tools['hot_reload'] = _configs.get_config(
'hot_reload', dev_tools_hot_reload, env,
default=debug,
is_bool=True
)
self._dev_tools['hot_reload_interval'] = int(_configs.get_config(
'hot_reload_interval', dev_tools_hot_reload_interval, env,
default=3000
))
self._dev_tools['hot_reload_watch_interval'] = float(
_configs.get_config(
'hot_reload_watch_interval',
dev_tools_hot_reload_watch_interval,
env,
default=0.5
)
)
self._dev_tools['hot_reload_max_retry'] = int(
_configs.get_config(
'hot_reload_max_retry',
dev_tools_hot_reload_max_retry,
env,
default=8
)
)
self._dev_tools['silence_routes_logging'] = _configs.get_config(
'silence_routes_logging', dev_tools_silence_routes_logging, env,
default=debug,
is_bool=True,
)
if self._dev_tools.silence_routes_logging:
logging.getLogger('werkzeug').setLevel(logging.ERROR)
self.logger.setLevel(logging.INFO)
if self._dev_tools.hot_reload:
self._reload_hash = _generate_hash()
self._watch_thread = threading.Thread(
target=lambda: _watch.watch(
[self._assets_folder],
self._on_assets_change,
sleep_time=self._dev_tools.hot_reload_watch_interval)
)
self._watch_thread.daemon = True
self._watch_thread.start()
if debug and self._dev_tools.serve_dev_bundles:
# Dev bundles only works locally.
self.scripts.config.serve_locally = True
return debug
# noinspection PyProtectedMember
def _on_assets_change(self, filename, modified, deleted):
self._lock.acquire()
self._hard_reload = True
self._reload_hash = _generate_hash()
asset_path = os.path.relpath(
filename, os.path.commonprefix([self._assets_folder, filename]))\
.replace('\\', '/').lstrip('/')
self._changed_assets.append({
'url': self.get_asset_url(asset_path),
'modified': int(modified),
'is_css': filename.endswith('css')
})
if filename not in self._assets_files and not deleted:
res = self._add_assets_resource(asset_path, filename)
if filename.endswith('js'):
self.scripts.append_script(res)
elif filename.endswith('css'):
self.css.append_css(res)
if deleted:
if filename in self._assets_files:
self._assets_files.remove(filename)
def delete_resource(resources):
to_delete = None
for r in resources:
if r.get('asset_path') == asset_path:
to_delete = r
break
if to_delete:
resources.remove(to_delete)
if filename.endswith('js'):
# pylint: disable=protected-access
delete_resource(self.scripts._resources._resources)
elif filename.endswith('css'):
# pylint: disable=protected-access
delete_resource(self.css._resources._resources)
self._lock.release()
def run_server(self,
port=8050,
debug=False,
dev_tools_serve_dev_bundles=None,
dev_tools_hot_reload=None,
dev_tools_hot_reload_interval=None,
dev_tools_hot_reload_watch_interval=None,
dev_tools_hot_reload_max_retry=None,
dev_tools_silence_routes_logging=None,
**flask_run_options):
"""
Start the flask server in local mode, you should not run this on a
production server and use gunicorn/waitress instead.
:param port: Port the application
:type port: int
:param debug: Set the debug mode of flask and enable the dev tools.
:type debug: bool
:param dev_tools_serve_dev_bundles: Serve the dev bundles of components
:type dev_tools_serve_dev_bundles: bool
:param dev_tools_hot_reload: Enable the hot reload.
:type dev_tools_hot_reload: bool
:param dev_tools_hot_reload_interval: Reload request interval.
:type dev_tools_hot_reload_interval: int
:param dev_tools_hot_reload_watch_interval:
:type dev_tools_hot_reload_watch_interval: float
:param dev_tools_hot_reload_max_retry: The number of times the reloader
requests can fail before displaying an alert.
:type dev_tools_hot_reload_max_retry: int
:param dev_tools_silence_routes_logging: Silence the routes logs.
:type dev_tools_silence_routes_logging: bool
:param flask_run_options: Given to `Flask.run`
:return:
"""
debug = self.enable_dev_tools(
debug,
dev_tools_serve_dev_bundles,
dev_tools_hot_reload,
dev_tools_hot_reload_interval,
dev_tools_hot_reload_watch_interval,
dev_tools_hot_reload_max_retry,
dev_tools_silence_routes_logging,
)
if self._dev_tools.silence_routes_logging:
# Since it's silenced, the address don't show anymore.
host = flask_run_options.get('host', '127.0.0.1')
ssl_context = flask_run_options.get('ssl_context')
self.logger.info(
'Running on %s://%s:%s%s',
'https' if ssl_context else 'http',
host, port, self.config.requests_pathname_prefix
)
# Generate a debugger pin and log it to the screen.
debugger_pin = os.environ['WERKZEUG_DEBUG_PIN'] = '-'.join(
itertools.chain(
''.join([str(random.randint(0, 9)) for _ in range(3)])
for _ in range(3))
)
self.logger.info(
'Debugger PIN: %s',
debugger_pin
)
self.server.run(port=port, debug=debug,
**flask_run_options)
| []
| []
| [
"WERKZEUG_DEBUG_PIN"
]
| [] | ["WERKZEUG_DEBUG_PIN"] | python | 1 | 0 | |
server.go | package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"os"
"time"
_ "github.com/lib/pq"
)
var (
db *sql.DB
)
func main() {
connInfo := fmt.Sprintf(
"user=%s dbname=%s password=%s host=%s port=%s sslmode=disable",
"postgres",
"postgres",
os.Getenv("DB_ENV_POSTGRES_PASSWORD"),
os.Getenv("HELLODOCKER_POSTGRES_1_PORT_5432_TCP_ADDR"),
os.Getenv("HELLODOCKER_POSTGRES_1_PORT_5432_TCP_PORT"),
)
var err error
db, err = sql.Open("postgres", connInfo)
if err != nil {
log.Fatal(err)
}
for i := 0; i < 5; i++ {
time.Sleep(time.Duration(i) * time.Second)
if err = db.Ping(); err == nil {
break
}
log.Println(err)
}
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(
`create table if not exists mydata (
id serial primary key,
val integer not null
)`)
if err != nil {
log.Fatal(err)
}
http.HandleFunc("/", serveIndex)
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal(err)
}
}
func serveIndex(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintln(resp, "Hello, World!\n")
fmt.Fprintln(resp, "DB_ADDR:", os.Getenv("DB_PORT_5432_TCP_ADDR"))
fmt.Fprintln(resp, "DB_PORT:", os.Getenv("DB_PORT_5432_TCP_PORT"))
_, err := db.Exec("insert into mydata(val) values(0)")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query("select id from mydata")
if err != nil {
log.Fatal(err)
}
for rows.Next() {
var id int
err = rows.Scan(&id)
if err != nil {
log.Fatal(err)
}
fmt.Fprintf(resp, "ID: %d\n", id)
}
}
| [
"\"DB_ENV_POSTGRES_PASSWORD\"",
"\"HELLODOCKER_POSTGRES_1_PORT_5432_TCP_ADDR\"",
"\"HELLODOCKER_POSTGRES_1_PORT_5432_TCP_PORT\"",
"\"DB_PORT_5432_TCP_ADDR\"",
"\"DB_PORT_5432_TCP_PORT\""
]
| []
| [
"DB_PORT_5432_TCP_PORT",
"DB_PORT_5432_TCP_ADDR",
"HELLODOCKER_POSTGRES_1_PORT_5432_TCP_PORT",
"DB_ENV_POSTGRES_PASSWORD",
"HELLODOCKER_POSTGRES_1_PORT_5432_TCP_ADDR"
]
| [] | ["DB_PORT_5432_TCP_PORT", "DB_PORT_5432_TCP_ADDR", "HELLODOCKER_POSTGRES_1_PORT_5432_TCP_PORT", "DB_ENV_POSTGRES_PASSWORD", "HELLODOCKER_POSTGRES_1_PORT_5432_TCP_ADDR"] | go | 5 | 0 | |
pkg/utilities/notifier.go | package utilities
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"pipeline-operator/pkg/apis/algorun/v1beta1"
)
var notifDisabled = false
func NotifyAll(notifMessages []*v1beta1.NotifMessage) {
for _, notifMessage := range notifMessages {
Notify(notifMessage)
}
}
func Notify(notifMessage *v1beta1.NotifMessage) {
if !notifDisabled {
notifEnvURL := os.Getenv("NOTIF_URL")
if notifEnvURL == "" {
notifDisabled = true
log.Error(errors.New("NOTIF_URL environment variable must be set in order to send notifications"), "Notifications disabled")
return
}
logData := map[string]interface{}{
"NotifType": ¬ifMessage.Type,
}
notifLogger := log.WithValues("data", logData)
u, _ := url.Parse(fmt.Sprintf("%s/%schanged", notifEnvURL, *notifMessage.Type))
notifURL := u.String()
jsonValue, _ := json.Marshal(notifMessage)
req, err := http.NewRequest("POST", notifURL, bytes.NewReader(jsonValue))
req.Header.Set("Content-Type", "application/json")
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
notifLogger.Error(err, "Error sending notification.")
return
}
defer resp.Body.Close()
}
}
| [
"\"NOTIF_URL\""
]
| []
| [
"NOTIF_URL"
]
| [] | ["NOTIF_URL"] | go | 1 | 0 | |
src/test/java/hudson/security/docker/PlanetExpressTest.java | package hudson.security.docker;
import hudson.Functions;
import hudson.security.LDAPSecurityRealm;
import hudson.tasks.MailAddressResolver;
import hudson.util.Secret;
import org.acegisecurity.userdetails.ldap.LdapUserDetails;
import org.jenkinsci.test.acceptance.docker.DockerContainer;
import org.jenkinsci.test.acceptance.docker.DockerFixture;
import org.jenkinsci.test.acceptance.docker.DockerRule;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.JenkinsRule;
import static org.hamcrest.core.StringContains.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assume.assumeFalse;
import org.junit.BeforeClass;
/**
* Tests the plugin when logging in to rroemhild/test-openldap
*/
public class PlanetExpressTest {
@BeforeClass public static void linuxOnly() {
assumeFalse("Windows CI builders now have Docker installed…but it does not support Linux images", Functions.isWindows() && System.getenv("JENKINS_URL") != null);
}
@Rule
public DockerRule<PlanetExpress> docker = new DockerRule<>(PlanetExpress.class);
@Rule
public JenkinsRule j = new JenkinsRule();
@Test
public void login() throws Exception {
PlanetExpress d = docker.get();
LDAPSecurityRealm realm = new LDAPSecurityRealm(d.getIpAddress(), PlanetExpress.DN, null, null, null, null, null, PlanetExpress.MANAGER_DN, Secret.fromString(PlanetExpress.MANAGER_SECRET), false, false, null, null, "cn", "mail", null,null);
j.jenkins.setSecurityRealm(realm);
j.configRoundtrip();
String content = j.createWebClient().login("fry", "fry").goTo("whoAmI").getBody().getTextContent();
assertThat(content, containsString("Philip J. Fry"));
LdapUserDetails zoidberg = (LdapUserDetails) j.jenkins.getSecurityRealm().loadUserByUsername("zoidberg");
assertEquals("cn=John A. Zoidberg,ou=people,dc=planetexpress,dc=com", zoidberg.getDn());
String leelaEmail = MailAddressResolver.resolve(j.jenkins.getUser("leela"));
assertEquals("[email protected]", leelaEmail);
}
@DockerFixture(id = "openldap-express", ports = {389, 636})
public static class PlanetExpress extends DockerContainer {
static final String DN = "dc=planetexpress,dc=com";
static final String MANAGER_DN = "cn=admin,dc=planetexpress,dc=com";
static final String MANAGER_SECRET = "GoodNewsEveryone";
}
}
| [
"\"JENKINS_URL\""
]
| []
| [
"JENKINS_URL"
]
| [] | ["JENKINS_URL"] | java | 1 | 0 | |
backend/src/services/asr/iflytek_asr.py | """
iflytek stream ASR service class (using WebSocket)
"""
import gevent
import os
from .interface import (
SpeechRecognitionConfig,
SpeechRecognitionRequest,
SpeechRecognitionResponse,
)
from .stream_asr import StreamAsr
from ..tokenizer import Tokenizer
import sys
import hashlib
from hashlib import sha1
import hmac
import base64
import json
import time
from websocket import create_connection
import websocket
from urllib.parse import quote
import logging
import queue
import re
"""
If you want to use iFlytek ASR service, copy your credentials into .env file under repo dir.
IFLYTEK_URL="XXX"
IFLYTEK_API_ID="XXX"
IFLYTEK_API_KEY="XXX"
"""
# iFlytek ASR use different language codes, mapping languages in our systems to iflytek's.
LANGUAGE_CODE_MAPPING = {"zh": "cn", "en-US": "en"}
class IFlyTekAsr(StreamAsr):
SUPPORTED_LANGUAGES = ("en-US", "zh")
POLLING_INTERVAL = 0.1 # seconds
def __init__(self, config: SpeechRecognitionConfig, logger, callback_fn):
super(IFlyTekAsr, self).__init__(config, logger, callback_fn)
self.start_time = time.time()
self.base_url: str = os.getenv("IFLYTEK_URL", "")
self.api_id = os.getenv("IFLYTEK_API_ID", "")
self.api_key = os.getenv("IFLYTEK_API_KEY", "")
self.init_timestamp = str(int(time.time()))
self.pd = "edu" # ASR domain
self.end_tag = '{"end": true}'
self.got_final = False
self.signa = self._get_signature()
self.lang_code = LANGUAGE_CODE_MAPPING[self.user_language]
# TODO: self.tokenizer does not support on-the-fly language switch.
self.tokenizer = Tokenizer(lang=self.user_language)
self.semaphore = gevent.lock.Semaphore()
self.connect()
def connect(self):
try:
self.ws = create_connection(
self.base_url
+ "?appid="
+ self.api_id
+ "&ts="
+ self.init_timestamp
+ "&signa="
+ quote(self.signa)
+ "&lang="
+ quote(self.lang_code)
)
except ConnectionRefusedError:
raise ConnectionRefusedError(
f"Could not connect to iflytek ASR server at {self.base_url} - is it running?"
)
with self.semaphore:
self.ws.send("")
def _get_signature(self):
tt = (self.api_id + self.init_timestamp).encode("utf-8")
md5 = hashlib.md5()
md5.update(tt)
baseString = md5.hexdigest()
baseString = bytes(baseString, encoding="utf-8")
apiKey = self.api_key.encode("utf-8")
signa = hmac.new(apiKey, baseString, hashlib.sha1).digest()
signa = base64.b64encode(signa)
signa = str(signa, "utf-8")
return signa
def run(self):
if not self.ws.connected:
self.connect()
while self.ws.connected:
try:
api_response = str(self.ws.recv())
except websocket.WebSocketConnectionClosedException:
print("receive result end")
break
if len(api_response) == 0:
self.got_final = True
break
api_response = json.loads(api_response)
response_code = int(api_response["code"])
if response_code != 0:
self.logger.error(f"ASR Response Error code: {response_code}")
continue
data = api_response["data"]
if api_response["action"] == "result":
data = json.loads(data)
pure_words_list = [
i["cw"][0]["w"] for i in data["cn"]["st"]["rt"][0]["ws"]
]
# 0-final result; 1-intermediate result
utterance_is_final = int(data["cn"]["st"]["type"]) == 0
if utterance_is_final:
self.got_final = True
response = SpeechRecognitionResponse(
transcript=self._build_transcript(tokens=pure_words_list),
relative_time_offset=time.time() - self.start_time,
is_final=utterance_is_final,
language=LANGUAGE_CODE_MAPPING[self.detected_language],
)
self.callback_fn(self.last_request, response)
gevent.sleep(IFlyTekAsr.POLLING_INTERVAL)
def __call__(self, request: SpeechRecognitionRequest) -> None:
self.last_request = request
data = request.chunk
self._send_chunk(data)
def end_utterance(self):
# Send special end of stream message
self._send_chunk(bytes(self.end_tag.encode("utf-8")))
def terminate(self, wait_for_final=True):
self.end_utterance()
if wait_for_final:
self.wait_for_final()
self.ws.close()
def wait_for_final(self, timeout_seconds=2.0):
"""
After closing, wait until the final response is sent, up to a timeout
"""
q = queue.Queue()
original_callback = self.callback_fn
def wrapped_callback(request, response):
if response.is_final:
q.put(response)
original_callback(request, response)
self.callback_fn = wrapped_callback
try:
final_response = q.get(timeout=timeout_seconds)
except queue.Empty:
final_response = SpeechRecognitionResponse(
transcript="",
relative_time_offset=0,
is_final=True,
language=self.detected_language,
)
self.callback_fn = original_callback
while not self.got_final:
gevent.sleep(0.01)
return final_response
def _build_transcript(self, tokens: list):
raw_transcript = self.tokenizer.detokenize(tokens)
transcript = self.postprocess(raw_transcript)
return transcript
def postprocess(self, text):
# Remove filler words
word_delimiter = "" if self.config.language == "zh" else " "
filler_words = ("mhm", "uh", "um")
text = word_delimiter.join(
w for w in text.strip().split() if w not in filler_words
)
# Remove content in parenthesis: {}, <>, [], and ()
text = re.sub(r"[<{\(\[].*?[\)\]>}]", "", text.strip())
# Fix acronyms
text = text.replace("._", ".")
# Remove leading and trailing whitespace
text = text.strip()
if self.config.language == "zh":
# Remove spaces, speaker ID in chinese
text = text.replace("[SPK]", "")
text = text.replace(" ", "")
else:
if text:
text = text[0].capitalize() + text[1:]
text = re.sub(r"\bi\b", "I", text)
return text
def _send_chunk(self, data):
try:
self.ws.send(data)
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
"WebSocketConnectionClosedException: socket is already closed."
)
| []
| []
| [
"IFLYTEK_API_ID",
"IFLYTEK_URL",
"IFLYTEK_API_KEY"
]
| [] | ["IFLYTEK_API_ID", "IFLYTEK_URL", "IFLYTEK_API_KEY"] | python | 3 | 0 | |
tests/test_client.py | # coding: utf-8
import os
import json
import pytest
import subprocess
import sys
import time
from textwrap import dedent
from sentry_sdk import Hub, Client, configure_scope, capture_message, capture_exception
from sentry_sdk.transport import Transport
from sentry_sdk._compat import reraise, text_type, PY2
from sentry_sdk.utils import HAS_CHAINED_EXCEPTIONS
if PY2:
# Importing ABCs from collections is deprecated, and will stop working in 3.8
# https://github.com/python/cpython/blob/master/Lib/collections/__init__.py#L49
from collections import Mapping
else:
# New in 3.3
# https://docs.python.org/3/library/collections.abc.html
from collections.abc import Mapping
class EventCaptured(Exception):
pass
class _TestTransport(Transport):
def capture_event(self, event):
raise EventCaptured(event)
def test_transport_option(monkeypatch):
if "SENTRY_DSN" in os.environ:
monkeypatch.delenv("SENTRY_DSN")
dsn = "https://[email protected]/123"
dsn2 = "https://[email protected]/124"
assert str(Client(dsn=dsn).dsn) == dsn
assert Client().dsn is None
monkeypatch.setenv("SENTRY_DSN", dsn)
transport = Transport({"dsn": dsn2})
assert text_type(transport.parsed_dsn) == dsn2
assert str(Client(transport=transport).dsn) == dsn
def test_proxy_http_use(monkeypatch):
client = Client("http://[email protected]/123", http_proxy="http://localhost/123")
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_https_use(monkeypatch):
client = Client("https://[email protected]/123", http_proxy="https://localhost/123")
assert client.transport._pool.proxy.scheme == "https"
def test_proxy_both_select_http(monkeypatch):
client = Client(
"http://[email protected]/123",
https_proxy="https://localhost/123",
http_proxy="http://localhost/123",
)
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_both_select_https(monkeypatch):
client = Client(
"https://[email protected]/123",
https_proxy="https://localhost/123",
http_proxy="http://localhost/123",
)
assert client.transport._pool.proxy.scheme == "https"
def test_proxy_http_fallback_http(monkeypatch):
client = Client("https://[email protected]/123", http_proxy="http://localhost/123")
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_none_noenv(monkeypatch):
client = Client("http://[email protected]/123")
assert client.transport._pool.proxy is None
def test_proxy_none_httpenv_select(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
client = Client("http://[email protected]/123")
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_none_httpsenv_select(monkeypatch):
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123")
assert client.transport._pool.proxy.scheme == "https"
def test_proxy_none_httpenv_fallback(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
client = Client("https://[email protected]/123")
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_bothselect_bothen(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123", http_proxy="", https_proxy="")
assert client.transport._pool.proxy is None
def test_proxy_bothavoid_bothenv(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123", http_proxy=None, https_proxy=None)
assert client.transport._pool.proxy.scheme == "https"
def test_proxy_bothselect_httpenv(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
client = Client("https://[email protected]/123", http_proxy=None, https_proxy=None)
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_httpselect_bothenv(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123", http_proxy=None, https_proxy="")
assert client.transport._pool.proxy.scheme == "http"
def test_proxy_httpsselect_bothenv(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123", http_proxy="", https_proxy=None)
assert client.transport._pool.proxy.scheme == "https"
def test_proxy_httpselect_httpsenv(monkeypatch):
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("https://[email protected]/123", http_proxy=None, https_proxy="")
assert client.transport._pool.proxy is None
def test_proxy_httpsselect_bothenv_http(monkeypatch):
monkeypatch.setenv("HTTP_PROXY", "http://localhost/123")
monkeypatch.setenv("HTTPS_PROXY", "https://localhost/123")
client = Client("http://[email protected]/123", http_proxy=None, https_proxy=None)
assert client.transport._pool.proxy.scheme == "http"
def test_simple_transport():
events = []
with Hub(Client(transport=events.append)):
capture_message("Hello World!")
assert events[0]["message"] == "Hello World!"
def test_ignore_errors():
class MyDivisionError(ZeroDivisionError):
pass
def raise_it(exc_info):
reraise(*exc_info)
hub = Hub(Client(ignore_errors=[ZeroDivisionError], transport=_TestTransport()))
hub._capture_internal_exception = raise_it
def e(exc):
try:
raise exc
except Exception:
hub.capture_exception()
e(ZeroDivisionError())
e(MyDivisionError())
pytest.raises(EventCaptured, lambda: e(ValueError()))
def test_with_locals_enabled():
events = []
hub = Hub(Client(with_locals=True, transport=events.append))
try:
1 / 0
except Exception:
hub.capture_exception()
(event,) = events
assert all(
frame["vars"]
for frame in event["exception"]["values"][0]["stacktrace"]["frames"]
)
def test_with_locals_disabled():
events = []
hub = Hub(Client(with_locals=False, transport=events.append))
try:
1 / 0
except Exception:
hub.capture_exception()
(event,) = events
assert all(
"vars" not in frame
for frame in event["exception"]["values"][0]["stacktrace"]["frames"]
)
def test_attach_stacktrace_enabled():
events = []
hub = Hub(Client(attach_stacktrace=True, transport=events.append))
def foo():
bar()
def bar():
hub.capture_message("HI")
foo()
(event,) = events
(thread,) = event["threads"]["values"]
functions = [x["function"] for x in thread["stacktrace"]["frames"]]
assert functions[-2:] == ["foo", "bar"]
def test_attach_stacktrace_enabled_no_locals():
events = []
hub = Hub(
Client(attach_stacktrace=True, with_locals=False, transport=events.append)
)
def foo():
bar()
def bar():
hub.capture_message("HI")
foo()
(event,) = events
(thread,) = event["threads"]["values"]
local_vars = [x.get("vars") for x in thread["stacktrace"]["frames"]]
assert local_vars[-2:] == [None, None]
def test_attach_stacktrace_in_app(sentry_init, capture_events):
sentry_init(attach_stacktrace=True, in_app_exclude=["_pytest"])
events = capture_events()
capture_message("hi")
(event,) = events
(thread,) = event["threads"]["values"]
frames = thread["stacktrace"]["frames"]
pytest_frames = [f for f in frames if f["module"].startswith("_pytest")]
assert pytest_frames
assert all(f["in_app"] is False for f in pytest_frames)
assert any(f["in_app"] for f in frames)
def test_attach_stacktrace_disabled():
events = []
hub = Hub(Client(attach_stacktrace=False, transport=events.append))
hub.capture_message("HI")
(event,) = events
assert "threads" not in event
def test_capture_event_works():
c = Client(transport=_TestTransport())
pytest.raises(EventCaptured, lambda: c.capture_event({}))
pytest.raises(EventCaptured, lambda: c.capture_event({}))
@pytest.mark.parametrize("num_messages", [10, 20])
def test_atexit(tmpdir, monkeypatch, num_messages):
app = tmpdir.join("app.py")
app.write(
dedent(
"""
import time
from sentry_sdk import init, transport, capture_message
def send_event(self, event):
time.sleep(0.1)
print(event["message"])
transport.HttpTransport._send_event = send_event
init("http://foobar@localhost/123", shutdown_timeout={num_messages})
for _ in range({num_messages}):
capture_message("HI")
""".format(
num_messages=num_messages
)
)
)
start = time.time()
output = subprocess.check_output([sys.executable, str(app)])
end = time.time()
# Each message takes at least 0.1 seconds to process
assert int(end - start) >= num_messages / 10
assert output.count(b"HI") == num_messages
def test_configure_scope_available(sentry_init, request, monkeypatch):
# Test that scope is configured if client is configured
sentry_init()
with configure_scope() as scope:
assert scope is Hub.current._stack[-1][1]
scope.set_tag("foo", "bar")
calls = []
def callback(scope):
calls.append(scope)
scope.set_tag("foo", "bar")
assert configure_scope(callback) is None
assert len(calls) == 1
assert calls[0] is Hub.current._stack[-1][1]
@pytest.mark.tests_internal_exceptions
def test_client_debug_option_enabled(sentry_init, caplog):
sentry_init(debug=True)
Hub.current._capture_internal_exception((ValueError, ValueError("OK"), None))
assert "OK" in caplog.text
@pytest.mark.tests_internal_exceptions
@pytest.mark.parametrize("with_client", (True, False))
def test_client_debug_option_disabled(with_client, sentry_init, caplog):
if with_client:
sentry_init()
Hub.current._capture_internal_exception((ValueError, ValueError("OK"), None))
assert "OK" not in caplog.text
def test_scope_initialized_before_client(sentry_init, capture_events):
"""
This is a consequence of how configure_scope() works. We must
make `configure_scope()` a noop if no client is configured. Even
if the user later configures a client: We don't know that.
"""
with configure_scope() as scope:
scope.set_tag("foo", 42)
sentry_init()
events = capture_events()
capture_message("hi")
(event,) = events
assert "tags" not in event
def test_weird_chars(sentry_init, capture_events):
sentry_init()
events = capture_events()
capture_message(u"föö".encode("latin1"))
(event,) = events
assert json.loads(json.dumps(event)) == event
def test_nan(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
nan = float("nan") # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
frames = event["exception"]["values"][0]["stacktrace"]["frames"]
(frame,) = frames
assert frame["vars"]["nan"] == "nan"
def test_cyclic_frame_vars(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
a = {}
a["a"] = a
1 / 0
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"] == {
"a": "<cyclic>"
}
def test_cyclic_data(sentry_init, capture_events):
sentry_init()
events = capture_events()
with configure_scope() as scope:
data = {}
data["is_cyclic"] = data
other_data = ""
data["not_cyclic"] = other_data
data["not_cyclic2"] = other_data
scope.set_extra("foo", data)
capture_message("hi")
(event,) = events
data = event["extra"]["foo"]
assert data == {"not_cyclic2": "", "not_cyclic": "", "is_cyclic": "<cyclic>"}
def test_databag_depth_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
value = ["a"]
for _ in range(100000):
value = [value]
@benchmark
def inner():
del events[:]
try:
a = value # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert len(json.dumps(event)) < 10000
def test_databag_string_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
@benchmark
def inner():
del events[:]
try:
a = "A" * 1000000 # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert len(json.dumps(event)) < 10000
def test_databag_breadth_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
@benchmark
def inner():
del events[:]
try:
a = ["a"] * 1000000 # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert len(json.dumps(event)) < 10000
@pytest.mark.skipif(not HAS_CHAINED_EXCEPTIONS, reason="Only works on 3.3+")
def test_chained_exceptions(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
try:
raise ValueError()
except Exception:
1 / 0
except Exception:
capture_exception()
(event,) = events
e1, e2 = event["exception"]["values"]
# This is the order all other SDKs send chained exceptions in. Including
# Raven-Python.
assert e1["type"] == "ValueError"
assert e2["type"] == "ZeroDivisionError"
@pytest.mark.tests_internal_exceptions
def test_broken_mapping(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(Mapping):
def broken(self, *args, **kwargs):
raise Exception("broken")
__getitem__ = broken
__setitem__ = broken
__delitem__ = broken
__iter__ = broken
__len__ = broken
def __repr__(self):
return "broken"
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert (
event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"]
== "<failed to serialize, use init(debug=True) to see error logs>"
)
def test_mapping_sends_exception(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(Mapping):
def __iter__(self):
try:
1 / 0
except ZeroDivisionError:
capture_exception()
yield "hi"
def __len__(self):
"""List length"""
return 1
def __getitem__(self, ii):
"""Get a list item"""
if ii == "hi":
return "hi"
raise KeyError()
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"] == {
"hi": "'hi'"
}
def test_object_sends_exception(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(object):
def __repr__(self):
try:
1 / 0
except ZeroDivisionError:
capture_exception()
return "hi, i am a repr"
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert (
event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"]
== "hi, i am a repr"
)
def test_errno_errors(sentry_init, capture_events):
sentry_init()
events = capture_events()
class Foo(Exception):
errno = 69
capture_exception(Foo())
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["mechanism"]["meta"]["errno"]["number"] == 69
def test_non_string_variables(sentry_init, capture_events):
"""There is some extremely terrible code in the wild that
inserts non-strings as variable names into `locals()`."""
sentry_init()
events = capture_events()
try:
locals()[42] = True
1 / 0
except ZeroDivisionError:
capture_exception()
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
(frame,) = exception["stacktrace"]["frames"]
assert frame["vars"]["42"] == "True"
def test_dict_changed_during_iteration(sentry_init, capture_events):
"""
Some versions of Bottle modify the WSGI environment inside of this __repr__
impl: https://github.com/bottlepy/bottle/blob/0.12.16/bottle.py#L1386
See https://github.com/getsentry/sentry-python/pull/298 for discussion
"""
sentry_init(send_default_pii=True)
events = capture_events()
class TooSmartClass(object):
def __init__(self, environ):
self.environ = environ
def __repr__(self):
if "my_representation" in self.environ:
return self.environ["my_representation"]
self.environ["my_representation"] = "<This is me>"
return self.environ["my_representation"]
try:
environ = {}
environ["a"] = TooSmartClass(environ)
1 / 0
except ZeroDivisionError:
capture_exception()
(event,) = events
(exception,) = event["exception"]["values"]
(frame,) = exception["stacktrace"]["frames"]
assert frame["vars"]["environ"] == {"a": "<This is me>"}
@pytest.mark.parametrize(
"dsn",
[
"http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2",
u"http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2",
],
)
def test_init_string_types(dsn, sentry_init):
# Allow unicode strings on Python 3 and both on Python 2 (due to
# unicode_literals)
#
# Supporting bytes on Python 3 is not really wrong but probably would be
# extra code
sentry_init(dsn)
assert (
Hub.current.client.dsn
== "http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2"
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
altuntas/altuntas/wsgi.py | """
WSGI config for altuntas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'altuntas.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | package main
import (
"log"
"os"
)
func main() {
app := NewSlackbot(os.Getenv("SLACK_TOKEN"), os.Getenv("SLACK_COOKIE"))
/* optional shutdown command */
app.ShutdownCMD = "__shutdown"
go func() {
<-app.Shutdown
app.Session.Disconnect()
log.Println("finished")
}()
app.HandleIncomingEvents()
}
| [
"\"SLACK_TOKEN\"",
"\"SLACK_COOKIE\""
]
| []
| [
"SLACK_COOKIE",
"SLACK_TOKEN"
]
| [] | ["SLACK_COOKIE", "SLACK_TOKEN"] | go | 2 | 0 | |
insta-unfollower.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import random
import requests
import json
import re
instagram_url = 'https://www.instagram.com'
login_route = '%s/accounts/login/ajax/' % (instagram_url)
logout_route = '%s/accounts/logout/' % (instagram_url)
profile_route = '%s/%s/'
query_route = '%s/graphql/query/' % (instagram_url)
unfollow_route = '%s/web/friendships/%s/unfollow/'
session = requests.Session()
def login():
session.headers.update({
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/48.0.2564.103 Safari/537.36'),
'X-Instagram-AJAX': '1',
'X-Requested-With': 'XMLHttpRequest'
})
session.cookies.update({
'ig_pr': '1',
'ig_vw': '1920',
})
reponse = session.get(instagram_url)
session.headers.update({
'X-CSRFToken': reponse.cookies['csrftoken']
})
time.sleep(random.randint(2, 6))
post_data = {
'username': os.environ.get('USERNAME'),
'password': os.environ.get('PASSWORD')
}
response = session.post(login_route, data=post_data, allow_redirects=True)
response_data = json.loads(response.text)
if response_data['authenticated']:
session.headers.update({
'X-CSRFToken': response.cookies['csrftoken']
})
return response_data['authenticated']
# Not so useful, it's just to simulate human actions better
def get_user_profile(username):
response = session.get(profile_route % (instagram_url, username))
extract = re.search(r'window._sharedData = (.+);</script>', str(response.text))
response = json.loads(extract.group(1))
return response['entry_data']['ProfilePage'][0]['graphql']['user']
def get_followers_list():
followers_list = []
query_hash = '56066f031e6239f35a904ac20c9f37d9'
variables = {
"id":session.cookies['ds_user_id'],
"include_reel":False,
"fetch_mutual":False,
"first":50
}
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
while response.status_code != 200:
time.sleep(600) # querying too much, sleeping a bit before querying again
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
print('.', end='', flush=True)
response = json.loads(response.text)
for edge in response['data']['user']['edge_followed_by']['edges']:
followers_list.append(edge['node'])
while response['data']['user']['edge_followed_by']['page_info']['has_next_page']:
variables['after'] = response['data']['user']['edge_followed_by']['page_info']['end_cursor']
time.sleep(2)
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
while response.status_code != 200:
time.sleep(600) # querying too much, sleeping a bit before querying again
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
print('.', end='', flush=True)
response = json.loads(response.text)
for edge in response['data']['user']['edge_followed_by']['edges']:
followers_list.append(edge['node'])
return followers_list
def get_following_list():
follows_list = []
query_hash = 'c56ee0ae1f89cdbd1c89e2bc6b8f3d18'
variables = {
"id":session.cookies['ds_user_id'],
"include_reel":False,
"fetch_mutual":False,
"first":50
}
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
while response.status_code != 200:
time.sleep(600) # querying too much, sleeping a bit before querying again
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
print('.', end='', flush=True)
response = json.loads(response.text)
for edge in response['data']['user']['edge_follow']['edges']:
follows_list.append(edge['node'])
while response['data']['user']['edge_follow']['page_info']['has_next_page']:
variables['after'] = response['data']['user']['edge_follow']['page_info']['end_cursor']
time.sleep(2)
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
while response.status_code != 200:
time.sleep(600) # querying too much, sleeping a bit before querying again
response = session.get(query_route, params={'query_hash': query_hash, 'variables': json.dumps(variables)})
print('.', end='', flush=True)
response = json.loads(response.text)
for edge in response['data']['user']['edge_follow']['edges']:
follows_list.append(edge['node'])
return follows_list
def unfollow(user):
response = session.get(profile_route % (instagram_url, user['username']))
time.sleep(random.randint(2, 4))
# update header again, idk why it changed
session.headers.update({
'X-CSRFToken': response.cookies['csrftoken']
})
response = session.post(unfollow_route % (instagram_url, user['id']))
response = json.loads(response.text)
if response['status'] != 'ok':
print('ERROR: {}'.format(unfollow.text))
sys.exit('might be unfollowing too fast, quit to prevent ban...')
def logout():
post_data = {
'csrfmiddlewaretoken': session.cookies['csrftoken']
}
logout = session.post(logout_route, data=post_data)
if logout.status_code == 200:
return True
return False
def main():
if not os.environ.get('USERNAME') or not os.environ.get('PASSWORD'):
sys.exit('please provide USERNAME and PASSWORD environement variables. Abording...')
is_logged = login()
if is_logged == False:
sys.exit('login failed, verify user/password combination')
time.sleep(random.randint(2, 4))
connected_user = get_user_profile(os.environ.get('USERNAME'))
print('You\'re now logged as {} ({} followers, {} following)'.format(connected_user['username'], connected_user['edge_followed_by']['count'], connected_user['edge_follow']['count']))
time.sleep(random.randint(2, 4))
print('building followers list...', end='', flush=True)
followers_list = get_followers_list()
print(' done')
print('building following list...', end='', flush=True)
following_list = get_following_list()
print(' done')
unfollow_users_list = [user for user in following_list if user not in followers_list]
print('you are following {} user(s) who aren\'t following you.'.format(len(unfollow_users_list)))
if len(unfollow_users_list) > 0:
print('Begin to unfollow users...')
for user in unfollow_users_list:
if not os.environ.get('UNFOLLOW_VERIFIED') and user['is_verified'] == True:
continue
time.sleep(random.randint(2, 4))
print('unfollowing {}'.format(user['username']))
unfollow(user)
is_logged_out = logout()
if is_logged_out:
sys.exit(0)
if __name__ == "__main__":
main()
| []
| []
| [
"USERNAME",
"UNFOLLOW_VERIFIED",
"PASSWORD"
]
| [] | ["USERNAME", "UNFOLLOW_VERIFIED", "PASSWORD"] | python | 3 | 0 | |
service/src/java/org/apache/hive/service/server/HiveServer2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.service.server;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.service.CompositeService;
import org.apache.hive.service.cli.CLIService;
import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService;
import org.apache.hive.service.cli.thrift.ThriftCLIService;
import org.apache.hive.service.cli.thrift.ThriftHttpCLIService;
/**
* HiveServer2.
*
*/
public class HiveServer2 extends CompositeService {
private static final Log LOG = LogFactory.getLog(HiveServer2.class);
private CLIService cliService;
private ThriftCLIService thriftCLIService;
public HiveServer2() {
super("HiveServer2");
HiveConf.setLoadHiveServer2Config(true);
}
@Override
public synchronized void init(HiveConf hiveConf) {
cliService = new CLIService();
addService(cliService);
String transportMode = System.getenv("HIVE_SERVER2_TRANSPORT_MODE");
if(transportMode == null) {
transportMode = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE);
}
if(transportMode != null && (transportMode.equalsIgnoreCase("http"))) {
thriftCLIService = new ThriftHttpCLIService(cliService);
}
else {
thriftCLIService = new ThriftBinaryCLIService(cliService);
}
addService(thriftCLIService);
super.init(hiveConf);
}
@Override
public synchronized void start() {
super.start();
}
@Override
public synchronized void stop() {
super.stop();
// there should already be an instance of the session pool manager.
// if not, ignoring is fine while stopping the hive server.
HiveConf hiveConf = this.getHiveConf();
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
try {
TezSessionPoolManager.getInstance().stop();
} catch (Exception e) {
LOG.error("Tez session pool manager stop had an error during stop of hive server");
e.printStackTrace();
}
}
}
private static void startHiveServer2() throws Throwable {
long attempts = 0, maxAttempts = 1;
while(true) {
HiveConf hiveConf = new HiveConf();
maxAttempts = hiveConf.getLongVar(HiveConf.ConfVars.HIVE_SERVER2_MAX_START_ATTEMPTS);
HiveServer2 server = null;
try {
server = new HiveServer2();
server.init(hiveConf);
server.start();
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
TezSessionPoolManager sessionPool = TezSessionPoolManager.getInstance();
sessionPool.setupPool(hiveConf);
sessionPool.startPool();
}
break;
} catch (Throwable throwable) {
if(++attempts >= maxAttempts) {
throw new Error("Max start attempts " + maxAttempts + " exhausted", throwable);
} else {
LOG.warn("Error starting HiveServer2 on attempt " + attempts +
", will retry in 60 seconds", throwable);
try {
if (server != null) {
server.stop();
server = null;
}
} catch (Exception e) {
LOG.info("Exception caught when calling stop of HiveServer2 before" +
" retrying start", e);
}
try {
Thread.sleep(60L * 1000L);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
}
}
public static void main(String[] args) {
try {
ServerOptionsProcessor oproc = new ServerOptionsProcessor("hiveserver2");
if (!oproc.process(args)) {
System.err.println("Error starting HiveServer2 with given arguments");
System.exit(-1);
}
//NOTE: It is critical to do this here so that log4j is reinitialized
// before any of the other core hive classes are loaded
String initLog4jMessage = LogUtils.initHiveLog4j();
LOG.debug(initLog4jMessage);
HiveStringUtils.startupShutdownMessage(HiveServer2.class, args, LOG);
//log debug message from "oproc" after log4j initialize properly
LOG.debug(oproc.getDebugMessage().toString());
startHiveServer2();
} catch (LogInitializationException e) {
LOG.error("Error initializing log: " + e.getMessage(), e);
System.exit(-1);
} catch (Throwable t) {
LOG.fatal("Error starting HiveServer2", t);
System.exit(-1);
}
}
}
| [
"\"HIVE_SERVER2_TRANSPORT_MODE\""
]
| []
| [
"HIVE_SERVER2_TRANSPORT_MODE"
]
| [] | ["HIVE_SERVER2_TRANSPORT_MODE"] | java | 1 | 0 | |
run_ucd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Run the UCD model.
Usage: python run_ucd.py
Input data files: ./data/instagram.pickle, ./data/user_friend_follower.csv, ./data/source_target.csv
Time: 30M
"""
import sys, os, pickle
import numpy as np
import pandas as pd
import networkx as nx
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score, accuracy_score
from sklearn.preprocessing import normalize
from scipy import sparse
import tensorflow as tf
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Concatenate, Activation, Input
from keras.layers import Lambda, Embedding, GRU, Bidirectional, TimeDistributed, concatenate
from keras.models import Model
from keras import backend as K
# external packages
from modules.gae.cost import CostAE, CostVAE
from modules.gae.model import GCNModelAE, GCNModelVAE
from modules.gae.preprocessing_t import preprocess_graph, construct_feed_dict, sparse_to_tuple
from modules.gmm import GMM
from modules.attention_layer import AttLayer
from modules.estimation_net import EstimationNet
from util import Timer
def crop(dimension, start, end):
# Crops (or slices) a Tensor on a given dimension from start to end
# example : to crop tensor x[:, :, 5:10]
# call slice(2, 5, 10) as you want to crop on the second dimension
def func(x):
if dimension == 0:
return x[start: end]
if dimension == 1:
return x[:, start: end]
if dimension == 2:
return x[:, :, start: end]
if dimension == 3:
return x[:, :, :, start: end]
if dimension == 4:
return x[:, :, :, :, start: end]
return Lambda(func)
def main():
timer = Timer()
timer.start()
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
tf.set_random_seed(0)
MAX_SENT_LENGTH = 20
MAX_SENTS = 100
EMBEDDING_DIM = 50
POST_DIM = 10
TEXT_DIM = 50
VALIDATION_SPLIT = 0.2
MIXTURES = 5
Graph_DIM = 10
TRAINING_EPOCHS = 50
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.')
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', Graph_DIM, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 32, 'Size of a mini-batch')
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('lambda1', 1e-4, 'Parameter of energy.')
flags.DEFINE_float('lambda2', 1e-9, 'lossSigma.')
flags.DEFINE_float('lambda3', 0.01, 'GAE.')
flags.DEFINE_string('model', 'gcn_ae', 'Model string.')
model_str = FLAGS.model
# variable to store evaluation results
precision_list = []
recall_list = []
f1_list = []
auc_list = []
for t in range(10):
with open('./data/instagram.pickle', 'rb') as handle:
store_data = pickle.load(handle)
labels = store_data['labels']
df = store_data['df']
data = store_data['data']
postInfo = store_data['postInfo']
timeInfo = store_data['timeInfo']
embedding_matrix = store_data['embedding_matrix']
word_index = store_data['word_index']
num_session = data.shape[0]
nb_validation_samples = int(VALIDATION_SPLIT * num_session)
'''For Evaluation'''
single_label = np.asarray(labels)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
zeros = np.zeros(num_session)
zeros = zeros.reshape((num_session, 1, 1))
# FLAGS.learning_rate = lr
'''Hierarchical Attention Network for text and other info'''
placeholders = {
'zero_input': tf.placeholder(tf.float32, shape=[None, 1, 1]),
'review_input': tf.placeholder(tf.float32, shape=[None, MAX_SENTS, MAX_SENT_LENGTH + 1]),
'post_input': tf.placeholder(tf.float32, shape=[None, 4, ]),
'time_label': tf.placeholder(tf.float32, shape=[None, MAX_SENTS])
}
g = nx.Graph()
edgelist = pd.read_csv('./data/source_target.csv')
for i, elrow in edgelist.iterrows():
g.add_edge(elrow[0].strip('\n'), elrow[1].strip('\n'))
adj = nx.adjacency_matrix(g)
user_attributes = pd.read_csv('./data/user_friend_follower.csv')
user_attributes = user_attributes.set_index('user').T.to_dict('list')
nodelist = list(g.nodes())
features = []
User_post = np.zeros((len(nodelist), num_session)) # 2218 number of posts
for id, node in enumerate(nodelist):
posts_ID = df.loc[df['owner_id'] == node].index.values.tolist()
for p_id in posts_ID:
User_post[id][p_id] = 1
features.append(user_attributes[node])
# only keep the posts that are in the training data
User_post_train = User_post[:, :-nb_validation_samples]
User_post_test = User_post[:, -nb_validation_samples:]
features = sparse.csr_matrix(features)
features = normalize(features, norm='max', axis=0)
adj_orig = adj
adj_orig = adj_orig - sparse.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_norm = preprocess_graph(adj)
adj_label = adj + sparse.eye(adj.shape[0])
adj_label = sparse_to_tuple(adj_label)
# Define placeholders
placeholders.setdefault('features', tf.sparse_placeholder(tf.float32))
placeholders.setdefault('adj', tf.sparse_placeholder(tf.float32))
placeholders.setdefault('adj_orig', tf.sparse_placeholder(tf.float32))
placeholders.setdefault('dropout', tf.placeholder_with_default(0., shape=()))
placeholders.setdefault('user_post', tf.placeholder(tf.int32, [len(nodelist), None]))
d = {placeholders['dropout']: FLAGS.dropout}
placeholders.update(d)
num_nodes = adj.shape[0]
features = sparse_to_tuple(features.tocoo())
num_features = features[2][1]
features_nonzero = features[1].shape[0]
'''Graph AutoEncoder'''
if model_str == 'gcn_ae':
Graph_model = GCNModelAE(placeholders, num_features, features_nonzero)
elif model_str == 'gcn_vae':
Graph_model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero)
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SENT_LENGTH,
trainable=True,
mask_zero=True)
all_input = Input(shape=(MAX_SENT_LENGTH + 1,))
sentence_input = crop(1, 0, MAX_SENT_LENGTH)(all_input) # slice
time_input = crop(1, MAX_SENT_LENGTH, MAX_SENT_LENGTH + 1)(all_input) # slice
embedded_sequences = embedding_layer(sentence_input)
# embedded_sequences=BatchNormalization()(embedded_sequences)
l_lstm = Bidirectional(GRU(TEXT_DIM, return_sequences=True))(embedded_sequences)
l_att = AttLayer(TEXT_DIM)(l_lstm) # (?,200)
# time_embedding=Dense(TIME_DIM,activation='sigmoid')(time_input)
merged_output = Concatenate()([l_att, time_input]) # text+time information
sentEncoder = Model(all_input, merged_output)
review_input = placeholders['review_input']
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(GRU(TEXT_DIM, return_sequences=True))(review_encoder)
fully_sent = Dense(1, use_bias=False)(l_lstm_sent)
pred_time = Activation(activation='linear')(fully_sent)
zero_input = placeholders['zero_input']
shift_predtime = Concatenate(axis=1)([zero_input, pred_time])
shift_predtime = crop(1, 0, MAX_SENTS)(shift_predtime)
l_att_sent = AttLayer(TEXT_DIM)(l_lstm_sent)
# embed the #likes, shares
post_input = placeholders['post_input']
fully_post = Dense(POST_DIM, use_bias=False)(post_input)
# norm_fullypost=BatchNormalization()(fully_post)
post_embedding = Activation(activation='relu')(fully_post)
fully_review = concatenate([l_att_sent, post_embedding]) # merge the document level vectro with the additional embedded features such as #likes
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
with tf.name_scope('graph_cost'):
preds_sub = Graph_model.reconstructions
labels_sub = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1])
if model_str == 'gcn_ae':
opt = CostAE(preds=preds_sub, labels=labels_sub, pos_weight=pos_weight, norm=norm)
elif model_str == 'gcn_vae':
opt = CostVAE(preds=preds_sub,
labels=labels_sub,
model=Graph_model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
User_latent = Graph_model.z_mean # (n_user, G_embeddim)
Post_latent = fully_review # (batch size, text_embed_dim+post_dim)
max_indices = tf.argmax(placeholders['user_post'], axis=0)
add_latent = tf.gather(User_latent, max_indices)
session_latent = tf.concat([Post_latent, add_latent], axis=1) # the representation of text + graph
'''DAGMM'''
h1_size = 2 * TEXT_DIM + Graph_DIM + POST_DIM
gmm = GMM(MIXTURES)
est_net = EstimationNet([h1_size, MIXTURES], tf.nn.tanh)
gamma = est_net.inference(session_latent, FLAGS.dropout)
gmm.fit(session_latent, gamma)
individual_energy = gmm.energy(session_latent)
Time_label = placeholders['time_label']
Time_label = tf.reshape(Time_label, [tf.shape(Time_label)[0], MAX_SENTS, 1])
with tf.name_scope('loss'):
GAE_error = opt.cost
energy = tf.reduce_mean(individual_energy)
lossSigma = gmm.cov_diag_loss()
prediction_error = tf.losses.mean_squared_error(shift_predtime, Time_label)
loss = prediction_error + FLAGS.lambda1 * energy + FLAGS.lambda2 * lossSigma + FLAGS.lambda3 * GAE_error
x_train = data[:-nb_validation_samples]
time_train = timeInfo[:-nb_validation_samples]
zeros_train = zeros[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
post_train = postInfo[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
zeros_test = zeros[-nb_validation_samples:]
time_test = timeInfo[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
post_test = postInfo[-nb_validation_samples:]
y_single = single_label[-nb_validation_samples:]
print('Number of positive and negative posts in training and validation set')
print(y_train.sum(axis=0))
print(y_val.sum(axis=0))
print("model fitting - Unsupervised cyberbullying detection")
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_step = optimizer.minimize(loss)
GAEcorrect_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
feed_dict_train = construct_feed_dict(zeros_train, x_train, post_train, time_train, FLAGS.dropout, adj_norm,
adj_label, features,
User_post_train, placeholders)
feed_dict_train.update({placeholders['dropout']: FLAGS.dropout})
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_batch = int(num_session / FLAGS.batch_size)
zero_batches = np.array_split(zeros_train, total_batch)
x_batches = np.array_split(x_train, total_batch)
p_batches = np.array_split(post_train, total_batch)
t_batches = np.array_split(time_train, total_batch)
UP_batches = np.array_split(User_post_train, total_batch, axis=1)
for epoch in range(TRAINING_EPOCHS):
ave_cost = 0
ave_energy = 0
ave_recon = 0
ave_sigma = 0
ave_GAE = 0
for i in range(total_batch):
batch_x = x_batches[i]
batch_p = p_batches[i]
batch_t = t_batches[i]
batch_z = zero_batches[i]
user_post = UP_batches[i]
feed_dict = construct_feed_dict(batch_z, batch_x, batch_p, batch_t, FLAGS.dropout, adj_norm, adj_label,
features, user_post,
placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
_, total_loss, loss_sigma, GAE_loss, Energy_error, recon_error = sess.run(
[train_step, loss, lossSigma, GAE_error, energy, prediction_error], feed_dict)
ave_cost += total_loss / total_batch
ave_energy += Energy_error / total_batch
ave_GAE += GAE_loss / total_batch
ave_sigma += loss_sigma / total_batch
ave_recon += recon_error / total_batch
# if epoch % 10 == 0 or epoch == TRAINING_EPOCHS - 1:
# print("This is epoch %d, the total loss is %f, energy error is %f, GAE error is %f, sigma error is %f,prediction error is %f") \
# % (epoch + 1, ave_cost, ave_energy, ave_GAE, ave_sigma, ave_recon)
fix = gmm.fix_op()
sess.run(fix, feed_dict=feed_dict_train)
feed_dict_test = construct_feed_dict(zeros_test, x_val, post_test, time_test, FLAGS.dropout, adj_norm, adj_label,
features, User_post_test, placeholders)
pred_energy,representations = sess.run([individual_energy,session_latent], feed_dict=feed_dict_test)
bully_energy_threshold = np.percentile(pred_energy, 65)
print('the bully energy threshold is : %f' % bully_energy_threshold)
label_pred = np.where(pred_energy >= bully_energy_threshold, 1, 0)
print(precision_recall_fscore_support(y_single, label_pred))
print(accuracy_score(y_single, label_pred))
print(roc_auc_score(y_single, label_pred))
tf.reset_default_graph()
K.clear_session()
precision_list.append(precision_recall_fscore_support(y_single, label_pred)[0][1])
recall_list.append(precision_recall_fscore_support(y_single, label_pred)[1][1])
f1_list.append(precision_recall_fscore_support(y_single, label_pred)[2][1])
auc_list.append(roc_auc_score(y_single, label_pred))
print('>>> Evaluation metrics')
print('>>> precision mean: {0.4f}; precision std: {1:.4f}'.format(np.mean(precision_list), np.std(precision_list)))
print('>>> recall mean: {0.4f}; recall std: {1:.4f}'.format(np.mean(recall_list), np.std(recall_list)))
print('>>> f1 mean: {0.4f}; f1 std: {1:.4f}'.format(np.mean(f1_list), np.std(f1_list)))
print('>>> auc mean: {0.4f}; auc std: {1:.4f}'.format(np.mean(auc_list), np.std(auc_list)))
timer.stop()
if __name__ == '__main__':
main()
| []
| []
| [
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
consumer.go | package main
import (
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"log"
"net"
"os"
"github.com/assembla/cony"
model "github.com/psavelis/goa-pos-poc/app"
"github.com/streadway/amqp"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var (
url *string
)
type posConnection struct {
db *mgo.Database
}
func getCloudAmqpURL() (url *string) {
amqpUser := os.Getenv("CLOUD_AMQP_USER")
missingEnv := false
if amqpUser == "" {
missingEnv = true
log.Printf("$CLOUD_AMQP_USER must be set")
}
amqpPassword := os.Getenv("CLOUD_AMQP_PASSWORD")
if amqpPassword == "" {
missingEnv = true
log.Printf("$CLOUD_AMQP_PASSWORD must be set")
}
if missingEnv {
panic("CloudAmqp environment variables not configured")
}
url = flag.String("url", fmt.Sprintf("amqp://%s:%[email protected]/%s", amqpUser, amqpPassword, amqpUser), "amqp url")
return url
}
func showUsageAndStatus() {
fmt.Printf("Consumer is running\n\n")
fmt.Println("Flags:")
flag.PrintDefaults()
fmt.Printf("\n\n")
}
func main() {
url = getCloudAmqpURL()
flag.Parse()
showUsageAndStatus()
// Creates a mongodb database instance
db := getDatabase()
// Construct new client with the flag url
// and default backoff policy
cli := cony.NewClient(
cony.URL(*url),
cony.Backoff(cony.DefaultBackoff),
)
// Declarations
// The queue name will be supplied by the AMQP server
que := &cony.Queue{
AutoDelete: true,
Name: "pos-purchase-created-queue",
}
exc := cony.Exchange{
Name: "purchase.created",
Kind: "fanout",
AutoDelete: true,
}
bnd := cony.Binding{
Queue: que,
Exchange: exc,
Key: "pubSub",
}
cli.Declare([]cony.Declaration{
cony.DeclareQueue(que),
cony.DeclareExchange(exc),
cony.DeclareBinding(bnd),
})
// Declare and register a consumer
cns := cony.NewConsumer(
que,
//cony.AutoAck(), // Auto sign the deliveries
)
cli.Consume(cns)
for cli.Loop() {
select {
case msg := <-cns.Deliveries():
log.Printf("Received body: %q\n", msg.Body)
// starts a new `goroutine` to process the msg.
go handleMessage(&msg, db)
case err := <-cns.Errors():
fmt.Printf("Consumer error: %v\n", err)
case err := <-cli.Errors():
fmt.Printf("Client error: %v\n", err)
}
}
}
// handleMessage process a new purchase message
func handleMessage(msg *amqp.Delivery, conn *posConnection) error {
// reuse from connection pool
session := conn.db.Session.Copy()
defer session.Close()
// defines an object with Purchase model defined in POS API
payload := model.Purchase{}
payload.Status = "FINISHED"
// deserializes the payload
if err := json.Unmarshal(msg.Body, &payload); err != nil {
fmt.Printf("Consumer failed to deserialize payload: %v\n", err)
msg.Nack(false, true) //failed to deserialize, requeues the message
return err
}
collection := session.DB("services-pos").C("Purchase")
err := collection.Update(bson.M{"_id": payload.TransactionID}, bson.M{"$set": bson.M{"status": payload.Status}})
if err != nil {
fmt.Printf("Consumer failed to update Purchase (ID=%q) status: %v\n", payload.TransactionID, err)
msg.Nack(false, true)
return err
}
// If when we built the consumer we didn't use
// the "cony.AutoAck()" option this is where we'd
// have to call the "amqp.Deliveries" methods "Ack",
// "Nack", "Reject"
//
// msg.Ack(false)
// msg.Nack(false)
// msg.Reject(false)
return msg.Ack(false)
}
func getDatabase() (db *posConnection) {
// MongoDB (Atlas) setup
tlsConfig := &tls.Config{}
tlsConfig.InsecureSkipVerify = true
mgoUser := os.Getenv("MONGO_USR")
if mgoUser == "" {
log.Printf("$MONGO_USR must be set")
}
mgoPassword := os.Getenv("MONGO_PWD")
if mgoPassword == "" {
log.Printf("$MONGO_PWD must be set")
}
dialInfo, err := mgo.ParseURL(fmt.Sprintf("mongodb://%s:%[email protected]:27017,development-shard-00-01-ozch3.mongodb.net:27017,development-shard-00-02-ozch3.mongodb.net:27017/test?replicaSet=development-shard-0&authSource=admin", mgoUser, mgoPassword))
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
return conn, err
}
session, err := mgo.DialWithInfo(dialInfo)
if err != nil {
panic(err)
}
defer session.Close()
session.SetMode(mgo.Monotonic, true)
// services-pos database
database := *session.DB("services-pos")
return &posConnection{db: &database}
}
| [
"\"CLOUD_AMQP_USER\"",
"\"CLOUD_AMQP_PASSWORD\"",
"\"MONGO_USR\"",
"\"MONGO_PWD\""
]
| []
| [
"CLOUD_AMQP_USER",
"MONGO_USR",
"MONGO_PWD",
"CLOUD_AMQP_PASSWORD"
]
| [] | ["CLOUD_AMQP_USER", "MONGO_USR", "MONGO_PWD", "CLOUD_AMQP_PASSWORD"] | go | 4 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seleniumProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
service/transfers.go | package service
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"strconv"
"github.com/zcoriarty/Pareto-Backend/apperr"
"github.com/zcoriarty/Pareto-Backend/repository/account"
"github.com/zcoriarty/Pareto-Backend/repository/transfer"
"github.com/gin-gonic/gin"
)
type ErrorBody struct {
Code int `json:"code"`
Message string `json:"message"`
}
func TransferRouter(svc *transfer.Service, acc *account.Service, r *gin.RouterGroup) {
a := Transfer{svc, acc}
ar := r.Group("/transfer")
ar.GET("", a.transfer)
ar.GET("/history", a.transfer)
ar.POST("/bank/:bank_id/deposit", a.createNewTransfer)
ar.DELETE("/:transfer_id/delete", a.deleteTransfer)
}
// Auth represents auth http service
type Transfer struct {
svc *transfer.Service
acc *account.Service
}
func (a *Transfer) transfer(c *gin.Context) {
id, _ := c.Get("id")
user := a.acc.GetProfile(c, id.(int))
if user.AccountID == "" {
apperr.Response(c, apperr.New(http.StatusBadRequest, "Account not found."))
return
}
limit := c.DefaultQuery("limit", "10000")
offset := c.DefaultQuery("offset", "0")
direction := c.DefaultQuery("direction", "")
transferListURL := os.Getenv("BROKER_API_BASE") + "/v1/accounts/" + user.AccountID + "/transfers?limit=" + limit + "&offset=" + offset + "&direction=" + direction
client := &http.Client{}
transferListRequest, _ := http.NewRequest("GET", transferListURL, nil)
transferListRequest.Header.Add("Authorization", os.Getenv("BROKER_TOKEN"))
transferList, _ := client.Do(transferListRequest)
transferListBody, err := ioutil.ReadAll(transferList.Body)
if err != nil {
apperr.Response(c, apperr.New(transferList.StatusCode, "Something went wrong. Try again later."))
return
}
if transferList.StatusCode != 200 {
errorBody := ErrorBody{}
json.Unmarshal(transferListBody, &errorBody)
apperr.Response(c, apperr.New(transferList.StatusCode, errorBody.Message))
return
}
var transferListJSON interface{}
json.Unmarshal(transferListBody, &transferListJSON)
c.JSON(transferList.StatusCode, transferListJSON)
}
func (a *Transfer) createNewTransfer(c *gin.Context) {
id, _ := c.Get("id")
user := a.acc.GetProfile(c, id.(int))
bankID := c.Param("bank_id")
amount, err := strconv.ParseFloat(c.PostForm("amount"), 64)
if err != nil {
apperr.Response(c, apperr.New(http.StatusBadRequest, "Invalid amount."))
return
}
if amount <= 0 {
apperr.Response(c, apperr.New(http.StatusBadRequest, "Amount must be greater than 0."))
return
}
if user.AccountID == "" {
apperr.Response(c, apperr.New(http.StatusBadRequest, "Account not found."))
return
}
createNewTransactionURL := os.Getenv("BROKER_API_BASE") + "/v1/accounts/" + user.AccountID + "/transfers"
client := &http.Client{}
createNewTransfer, _ := json.Marshal(map[string]interface{}{
"transfer_type": "ach",
"relationship_id": bankID,
"amount": amount,
"direction": "INCOMING",
})
tranferBody := bytes.NewBuffer(createNewTransfer)
req, err := http.NewRequest("POST", createNewTransactionURL, tranferBody)
req.Header.Add("Authorization", os.Getenv("BROKER_TOKEN"))
createTransfer, err := client.Do(req)
responseData, err := ioutil.ReadAll(createTransfer.Body)
if err != nil {
apperr.Response(c, apperr.New(createTransfer.StatusCode, "Something went wrong. Try again later."))
return
}
if createTransfer.StatusCode != 200 {
errorBody := ErrorBody{}
json.Unmarshal(responseData, &errorBody)
apperr.Response(c, apperr.New(createTransfer.StatusCode, errorBody.Message))
return
}
var responseObject map[string]interface{}
json.Unmarshal(responseData, &responseObject)
c.JSON(createTransfer.StatusCode, responseObject)
}
func (a *Transfer) deleteTransfer(c *gin.Context) {
id, _ := c.Get("id")
user := a.acc.GetProfile(c, id.(int))
if user.AccountID == "" {
apperr.Response(c, apperr.New(http.StatusBadRequest, "Account not found."))
return
}
transferID := c.Param("transfer_id")
deleteTransfersListURL := os.Getenv("BROKER_API_BASE") + "/v1/accounts/" + user.AccountID + "/transfers/" + transferID
client := &http.Client{}
transferDeleteRequest, _ := http.NewRequest("DELETE", deleteTransfersListURL, nil)
transferDeleteRequest.Header.Add("Authorization", os.Getenv("BROKER_TOKEN"))
transferDeleteResponse, _ := client.Do(transferDeleteRequest)
transferDeleteBody, err := ioutil.ReadAll(transferDeleteResponse.Body)
if err != nil {
apperr.Response(c, apperr.New(transferDeleteResponse.StatusCode, "Something went wrong. Try again later."))
return
}
if transferDeleteResponse.StatusCode != 200 {
errorBody := ErrorBody{}
json.Unmarshal(transferDeleteBody, &errorBody)
apperr.Response(c, apperr.New(transferDeleteResponse.StatusCode, errorBody.Message))
return
}
var responseObject interface{}
json.Unmarshal(transferDeleteBody, &responseObject)
c.JSON(transferDeleteResponse.StatusCode, responseObject)
}
| [
"\"BROKER_API_BASE\"",
"\"BROKER_TOKEN\"",
"\"BROKER_API_BASE\"",
"\"BROKER_TOKEN\"",
"\"BROKER_API_BASE\"",
"\"BROKER_TOKEN\""
]
| []
| [
"BROKER_TOKEN",
"BROKER_API_BASE"
]
| [] | ["BROKER_TOKEN", "BROKER_API_BASE"] | go | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.