filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/internal/execabs/execabs_test.go
|
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package execabs
import (
"context"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
func TestFixCmd(t *testing.T) {
cmd := &exec.Cmd{Path: "hello"}
fixCmd("hello", cmd)
if cmd.Path != "" {
t.Errorf("fixCmd didn't clear cmd.Path")
}
expectedErr := fmt.Sprintf("hello resolves to executable in current directory (.%chello)", filepath.Separator)
if err := cmd.Run(); err == nil {
t.Fatal("Command.Run didn't fail")
} else if err.Error() != expectedErr {
t.Fatalf("Command.Run returned unexpected error: want %q, got %q", expectedErr, err.Error())
}
}
func TestCommand(t *testing.T) {
testenv.MustHaveExec(t)
for _, cmd := range []func(string) *Cmd{
func(s string) *Cmd { return Command(s) },
func(s string) *Cmd { return CommandContext(context.Background(), s) },
} {
tmpDir, err := ioutil.TempDir("", "execabs-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %s", err)
}
defer os.RemoveAll(tmpDir)
executable := "execabs-test"
if runtime.GOOS == "windows" {
executable += ".exe"
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, executable), []byte{1, 2, 3}, 0111); err != nil {
t.Fatalf("ioutil.WriteFile failed: %s", err)
}
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("os.Getwd failed: %s", err)
}
defer os.Chdir(cwd)
if err = os.Chdir(tmpDir); err != nil {
t.Fatalf("os.Chdir failed: %s", err)
}
if runtime.GOOS != "windows" {
// add "." to PATH so that exec.LookPath looks in the current directory on
// non-windows platforms as well
origPath := os.Getenv("PATH")
defer os.Setenv("PATH", origPath)
os.Setenv("PATH", fmt.Sprintf(".:%s", origPath))
}
expectedErr := fmt.Sprintf("execabs-test resolves to executable in current directory (.%c%s)", filepath.Separator, executable)
if err = cmd("execabs-test").Run(); err == nil {
t.Fatalf("Command.Run didn't fail when exec.LookPath returned a relative path")
} else if err.Error() != expectedErr {
t.Errorf("Command.Run returned unexpected error: want %q, got %q", expectedErr, err.Error())
}
}
}
func TestLookPath(t *testing.T) {
testenv.MustHaveExec(t)
tmpDir, err := ioutil.TempDir("", "execabs-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %s", err)
}
defer os.RemoveAll(tmpDir)
executable := "execabs-test"
if runtime.GOOS == "windows" {
executable += ".exe"
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, executable), []byte{1, 2, 3}, 0111); err != nil {
t.Fatalf("ioutil.WriteFile failed: %s", err)
}
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("os.Getwd failed: %s", err)
}
defer os.Chdir(cwd)
if err = os.Chdir(tmpDir); err != nil {
t.Fatalf("os.Chdir failed: %s", err)
}
if runtime.GOOS != "windows" {
// add "." to PATH so that exec.LookPath looks in the current directory on
// non-windows platforms as well
origPath := os.Getenv("PATH")
defer os.Setenv("PATH", origPath)
os.Setenv("PATH", fmt.Sprintf(".:%s", origPath))
}
expectedErr := fmt.Sprintf("execabs-test resolves to executable in current directory (.%c%s)", filepath.Separator, executable)
if _, err := LookPath("execabs-test"); err == nil {
t.Fatalf("LookPath didn't fail when finding a non-relative path")
} else if err.Error() != expectedErr {
t.Errorf("LookPath returned unexpected error: want %q, got %q", expectedErr, err.Error())
}
}
|
[
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
vendor/github.com/looplab/eventhorizon/eventstore/mongodb/eventstore_test.go
|
// Copyright (c) 2015 - The Event Horizon authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mongodb
import (
"context"
"os"
"testing"
eh "github.com/looplab/eventhorizon"
"github.com/looplab/eventhorizon/eventstore/testutil"
)
func TestEventStore(t *testing.T) {
// Support Wercker testing with MongoDB.
host := os.Getenv("MONGO_PORT_27017_TCP_ADDR")
port := os.Getenv("MONGO_PORT_27017_TCP_PORT")
url := "localhost"
if host != "" && port != "" {
url = host + ":" + port
}
store, err := NewEventStore(url, "test")
if err != nil {
t.Fatal("there should be no error:", err)
}
if store == nil {
t.Fatal("there should be a store")
}
ctx := eh.NewContextWithNamespace(context.Background(), "ns")
defer store.Close()
defer func() {
t.Log("clearing db")
if err = store.Clear(context.Background()); err != nil {
t.Fatal("there should be no error:", err)
}
if err = store.Clear(ctx); err != nil {
t.Fatal("there should be no error:", err)
}
}()
// Run the actual test suite.
t.Log("event store with default namespace")
testutil.EventStoreCommonTests(t, context.Background(), store)
t.Log("event store with other namespace")
testutil.EventStoreCommonTests(t, ctx, store)
t.Log("event store maintainer")
testutil.EventStoreMaintainerCommonTests(t, context.Background(), store)
}
|
[
"\"MONGO_PORT_27017_TCP_ADDR\"",
"\"MONGO_PORT_27017_TCP_PORT\""
] |
[] |
[
"MONGO_PORT_27017_TCP_ADDR",
"MONGO_PORT_27017_TCP_PORT"
] |
[]
|
["MONGO_PORT_27017_TCP_ADDR", "MONGO_PORT_27017_TCP_PORT"]
|
go
| 2 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'bitcoin_cli.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'test_script_address2.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
'minchainwork.py',
'p2p-acceptblock.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/slavecoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and slavecoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "slavecoind"]) is not None:
print("%sWARNING!%s There is already a slavecoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "LITECOIND" not in os.environ:
os.environ["LITECOIND"] = build_dir + '/src/slavecoind' + exeext
os.environ["LITECOINCLI"] = build_dir + '/src/slavecoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `slavecoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LITECOIND",
"LITECOINCLI",
"TRAVIS"
] |
[]
|
["LITECOIND", "LITECOINCLI", "TRAVIS"]
|
python
| 3 | 0 | |
graalpython/com.oracle.graal.python/src/com/oracle/graal/python/builtins/modules/PosixModuleBuiltins.java
|
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates.
* Copyright (c) 2014, Regents of the University of California
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.oracle.graal.python.builtins.modules;
import static com.oracle.graal.python.runtime.exception.PythonErrorType.FileNotFoundError;
import static com.oracle.graal.python.runtime.exception.PythonErrorType.NotImplementedError;
import static com.oracle.graal.python.runtime.exception.PythonErrorType.OSError;
import static com.oracle.graal.python.runtime.exception.PythonErrorType.TypeError;
import static com.oracle.graal.python.runtime.exception.PythonErrorType.ValueError;
import static com.oracle.truffle.api.TruffleFile.CREATION_TIME;
import static com.oracle.truffle.api.TruffleFile.IS_DIRECTORY;
import static com.oracle.truffle.api.TruffleFile.IS_REGULAR_FILE;
import static com.oracle.truffle.api.TruffleFile.IS_SYMBOLIC_LINK;
import static com.oracle.truffle.api.TruffleFile.LAST_ACCESS_TIME;
import static com.oracle.truffle.api.TruffleFile.LAST_MODIFIED_TIME;
import static com.oracle.truffle.api.TruffleFile.SIZE;
import static com.oracle.truffle.api.TruffleFile.UNIX_CTIME;
import static com.oracle.truffle.api.TruffleFile.UNIX_DEV;
import static com.oracle.truffle.api.TruffleFile.UNIX_GID;
import static com.oracle.truffle.api.TruffleFile.UNIX_GROUP;
import static com.oracle.truffle.api.TruffleFile.UNIX_INODE;
import static com.oracle.truffle.api.TruffleFile.UNIX_MODE;
import static com.oracle.truffle.api.TruffleFile.UNIX_NLINK;
import static com.oracle.truffle.api.TruffleFile.UNIX_OWNER;
import static com.oracle.truffle.api.TruffleFile.UNIX_PERMISSIONS;
import static com.oracle.truffle.api.TruffleFile.UNIX_UID;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.lang.ProcessBuilder.Redirect;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.channels.Channel;
import java.nio.channels.NonWritableChannelException;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.SeekableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.nio.file.AccessDeniedException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.FileSystemException;
import java.nio.file.InvalidPathException;
import java.nio.file.LinkOption;
import java.nio.file.NoSuchFileException;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.FileTime;
import java.nio.file.attribute.GroupPrincipal;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.nio.file.attribute.UserPrincipal;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import com.oracle.graal.python.PythonLanguage;
import com.oracle.graal.python.builtins.Builtin;
import com.oracle.graal.python.builtins.CoreFunctions;
import com.oracle.graal.python.builtins.PythonBuiltinClassType;
import com.oracle.graal.python.builtins.PythonBuiltins;
import com.oracle.graal.python.builtins.modules.PosixModuleBuiltinsFactory.StatNodeFactory;
import com.oracle.graal.python.builtins.objects.PNone;
import com.oracle.graal.python.builtins.objects.bytes.BytesNodes;
import com.oracle.graal.python.builtins.objects.bytes.PByteArray;
import com.oracle.graal.python.builtins.objects.bytes.PBytes;
import com.oracle.graal.python.builtins.objects.bytes.PIBytesLike;
import com.oracle.graal.python.builtins.objects.common.SequenceNodes;
import com.oracle.graal.python.builtins.objects.common.SequenceNodes.LenNode;
import com.oracle.graal.python.builtins.objects.common.SequenceStorageNodes;
import com.oracle.graal.python.builtins.objects.common.SequenceStorageNodes.GetItemDynamicNode;
import com.oracle.graal.python.builtins.objects.common.SequenceStorageNodes.GetItemNode;
import com.oracle.graal.python.builtins.objects.common.SequenceStorageNodesFactory.ToByteArrayNodeGen;
import com.oracle.graal.python.builtins.objects.dict.PDict;
import com.oracle.graal.python.builtins.objects.exception.OSErrorEnum;
import com.oracle.graal.python.builtins.objects.floats.PFloat;
import com.oracle.graal.python.builtins.objects.function.PArguments;
import com.oracle.graal.python.builtins.objects.function.PArguments.ThreadState;
import com.oracle.graal.python.builtins.objects.function.PKeyword;
import com.oracle.graal.python.builtins.objects.ints.PInt;
import com.oracle.graal.python.builtins.objects.list.PList;
import com.oracle.graal.python.builtins.objects.module.PythonModule;
import com.oracle.graal.python.builtins.objects.object.PythonObjectLibrary;
import com.oracle.graal.python.builtins.objects.socket.PSocket;
import com.oracle.graal.python.builtins.objects.str.PString;
import com.oracle.graal.python.builtins.objects.tuple.PTuple;
import com.oracle.graal.python.builtins.objects.type.LazyPythonClass;
import com.oracle.graal.python.nodes.PRaiseOSErrorNode;
import com.oracle.graal.python.nodes.SpecialMethodNames;
import com.oracle.graal.python.nodes.attributes.ReadAttributeFromObjectNode;
import com.oracle.graal.python.nodes.expression.IsExpressionNode.IsNode;
import com.oracle.graal.python.nodes.function.PythonBuiltinBaseNode;
import com.oracle.graal.python.nodes.function.PythonBuiltinNode;
import com.oracle.graal.python.nodes.function.builtins.PythonBinaryBuiltinNode;
import com.oracle.graal.python.nodes.function.builtins.PythonTernaryBuiltinNode;
import com.oracle.graal.python.nodes.function.builtins.PythonUnaryBuiltinNode;
import com.oracle.graal.python.nodes.truffle.PythonArithmeticTypes;
import com.oracle.graal.python.nodes.util.CastToIntegerFromIntNode;
import com.oracle.graal.python.nodes.util.CastToJavaIntNode;
import com.oracle.graal.python.nodes.util.CastToPathNode;
import com.oracle.graal.python.nodes.util.ChannelNodes.ReadFromChannelNode;
import com.oracle.graal.python.nodes.util.CoerceToJavaLongNode;
import com.oracle.graal.python.runtime.PosixResources;
import com.oracle.graal.python.runtime.PythonContext;
import com.oracle.graal.python.runtime.PythonCore;
import com.oracle.graal.python.runtime.PythonOptions;
import com.oracle.graal.python.runtime.exception.PException;
import com.oracle.graal.python.runtime.exception.PythonErrorType;
import com.oracle.graal.python.runtime.exception.PythonExitException;
import com.oracle.graal.python.runtime.sequence.PSequence;
import com.oracle.graal.python.runtime.sequence.storage.ByteSequenceStorage;
import com.oracle.graal.python.util.FileDeleteShutdownHook;
import com.oracle.truffle.api.CompilerDirectives;
import com.oracle.truffle.api.CompilerDirectives.CompilationFinal;
import com.oracle.truffle.api.CompilerDirectives.TruffleBoundary;
import com.oracle.truffle.api.TruffleFile;
import com.oracle.truffle.api.TruffleLanguage.Env;
import com.oracle.truffle.api.dsl.Cached;
import com.oracle.truffle.api.dsl.Cached.Shared;
import com.oracle.truffle.api.dsl.Fallback;
import com.oracle.truffle.api.dsl.GenerateNodeFactory;
import com.oracle.truffle.api.dsl.ImportStatic;
import com.oracle.truffle.api.dsl.NodeFactory;
import com.oracle.truffle.api.dsl.Specialization;
import com.oracle.truffle.api.dsl.TypeSystemReference;
import com.oracle.truffle.api.frame.VirtualFrame;
import com.oracle.truffle.api.library.CachedLibrary;
import com.oracle.truffle.api.profiles.BranchProfile;
import com.oracle.truffle.api.profiles.ConditionProfile;
import com.oracle.truffle.api.profiles.ValueProfile;
import com.sun.security.auth.UnixNumericGroupPrincipal;
import com.sun.security.auth.UnixNumericUserPrincipal;
@CoreFunctions(defineModule = "posix")
public class PosixModuleBuiltins extends PythonBuiltins {
private static final int TMPFILE = 4259840;
private static final int TEMPORARY = 4259840;
private static final int SYNC = 1052672;
private static final int RSYNC = 1052672;
private static final int DIRECT = 16384;
private static final int DSYNC = 4096;
private static final int NDELAY = 2048;
private static final int NONBLOCK = 2048;
private static final int APPEND = 1024;
private static final int TRUNC = 512;
private static final int EXCL = 128;
private static final int CREAT = 64;
private static final int RDWR = 2;
private static final int WRONLY = 1;
private static final int RDONLY = 0;
private static final int SEEK_SET = 0;
private static final int SEEK_CUR = 1;
private static final int SEEK_END = 2;
private static final int WNOHANG = 1;
private static final int WUNTRACED = 3;
private static final int F_OK = 0;
private static final int X_OK = 1;
private static final int W_OK = 2;
private static final int R_OK = 4;
private static PosixFilePermission[][] otherBitsToPermission = new PosixFilePermission[][]{
new PosixFilePermission[]{},
new PosixFilePermission[]{PosixFilePermission.OTHERS_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OTHERS_WRITE},
new PosixFilePermission[]{PosixFilePermission.OTHERS_WRITE, PosixFilePermission.OTHERS_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OTHERS_READ},
new PosixFilePermission[]{PosixFilePermission.OTHERS_READ, PosixFilePermission.OTHERS_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OTHERS_READ, PosixFilePermission.OTHERS_WRITE},
new PosixFilePermission[]{PosixFilePermission.OTHERS_READ, PosixFilePermission.OTHERS_WRITE, PosixFilePermission.OTHERS_EXECUTE},
};
private static PosixFilePermission[][] groupBitsToPermission = new PosixFilePermission[][]{
new PosixFilePermission[]{},
new PosixFilePermission[]{PosixFilePermission.GROUP_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.GROUP_WRITE},
new PosixFilePermission[]{PosixFilePermission.GROUP_WRITE, PosixFilePermission.GROUP_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.GROUP_READ},
new PosixFilePermission[]{PosixFilePermission.GROUP_READ, PosixFilePermission.GROUP_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.GROUP_READ, PosixFilePermission.GROUP_WRITE},
new PosixFilePermission[]{PosixFilePermission.GROUP_READ, PosixFilePermission.GROUP_WRITE, PosixFilePermission.GROUP_EXECUTE},
};
private static PosixFilePermission[][] ownerBitsToPermission = new PosixFilePermission[][]{
new PosixFilePermission[]{},
new PosixFilePermission[]{PosixFilePermission.OWNER_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OWNER_WRITE},
new PosixFilePermission[]{PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OWNER_READ},
new PosixFilePermission[]{PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_EXECUTE},
new PosixFilePermission[]{PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE},
new PosixFilePermission[]{PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE},
};
private static boolean terminalIsInteractive(PythonContext context) {
return PythonOptions.getFlag(context, PythonOptions.TerminalIsInteractive);
}
@Override
protected List<? extends NodeFactory<? extends PythonBuiltinBaseNode>> getNodeFactories() {
return PosixModuleBuiltinsFactory.getFactories();
}
public abstract static class PythonFileNode extends PythonBuiltinNode {
protected PosixResources getResources() {
return getContext().getResources();
}
}
public PosixModuleBuiltins() {
builtinConstants.put("O_RDONLY", RDONLY);
builtinConstants.put("O_WRONLY", WRONLY);
builtinConstants.put("O_RDWR", RDWR);
builtinConstants.put("O_CREAT", CREAT);
builtinConstants.put("O_EXCL", EXCL);
builtinConstants.put("O_TRUNC", TRUNC);
builtinConstants.put("O_APPEND", APPEND);
builtinConstants.put("O_NONBLOCK", NONBLOCK);
builtinConstants.put("O_NDELAY", NDELAY);
builtinConstants.put("O_DSYNC", DSYNC);
builtinConstants.put("O_DIRECT", DIRECT);
builtinConstants.put("O_RSYNC", RSYNC);
builtinConstants.put("O_SYNC", SYNC);
builtinConstants.put("O_TEMPORARY", TEMPORARY);
builtinConstants.put("O_TMPFILE", TMPFILE);
builtinConstants.put("SEEK_SET", SEEK_SET);
builtinConstants.put("SEEK_CUR", SEEK_CUR);
builtinConstants.put("SEEK_END", SEEK_END);
builtinConstants.put("WNOHANG", WNOHANG);
builtinConstants.put("WUNTRACED", WUNTRACED);
builtinConstants.put("F_OK", F_OK);
builtinConstants.put("X_OK", X_OK);
builtinConstants.put("W_OK", W_OK);
builtinConstants.put("R_OK", R_OK);
}
@Override
public void initialize(PythonCore core) {
super.initialize(core);
builtinConstants.put("_have_functions", core.factory().createList());
builtinConstants.put("environ", core.factory().createDict());
}
@Override
public void postInitialize(PythonCore core) {
super.postInitialize(core);
// fill the environ dictionary with the current environment
Map<String, String> getenv = System.getenv();
PDict environ = core.factory().createDict();
for (Entry<String, String> entry : getenv.entrySet()) {
environ.setItem(core.factory().createBytes(entry.getKey().getBytes()), core.factory().createBytes(entry.getValue().getBytes()));
}
PythonModule posix = core.lookupBuiltinModule("posix");
Object environAttr = posix.getAttribute("environ");
((PDict) environAttr).setDictStorage(environ.getDictStorage());
}
@Builtin(name = "execv", minNumOfPositionalArgs = 3, declaresExplicitSelf = true)
@GenerateNodeFactory
public abstract static class ExecvNode extends PythonBuiltinNode {
@Child private BytesNodes.ToBytesNode toBytes = BytesNodes.ToBytesNode.create();
@Specialization
Object execute(PythonModule thisModule, String path, PList args) {
return doExecute(thisModule, path, args);
}
@Specialization
Object execute(PythonModule thisModule, PString path, PTuple args) {
return execute(thisModule, path.getValue(), args);
}
@Specialization
Object execute(PythonModule thisModule, String path, PTuple args) {
// in case of execl the PList happens to be in the tuples first entry
Object list = GetItemDynamicNode.getUncached().execute(args.getSequenceStorage(), 0);
return doExecute(thisModule, path, list instanceof PList ? (PList) list : args);
}
@Specialization
Object execute(PythonModule thisModule, PString path, PList args) {
return doExecute(thisModule, path.getValue(), args);
}
@TruffleBoundary
Object doExecute(PythonModule thisModule, String path, PSequence args) {
try {
if (!getContext().isExecutableAccessAllowed()) {
throw raise(OSError, "executable access denied");
}
int size = args.getSequenceStorage().length();
String[] cmd = new String[size];
// We don't need the path variable because it's already in the array
// but I need to process it for CI gate
cmd[0] = path;
for (int i = 0; i < size; i++) {
cmd[i] = GetItemDynamicNode.getUncached().execute(args.getSequenceStorage(), i).toString();
}
PDict environ = (PDict) thisModule.getAttribute("environ");
ProcessBuilder builder = new ProcessBuilder(cmd);
Map<String, String> environment = builder.environment();
environ.entries().forEach(entry -> {
environment.put(new String(toBytes.execute(null, entry.key)), new String(toBytes.execute(null, entry.value)));
});
Process pr = builder.start();
BufferedReader bfr = new BufferedReader(new InputStreamReader(pr.getInputStream()));
OutputStream stream = getContext().getEnv().out();
String line = "";
while ((line = bfr.readLine()) != null) {
stream.write(line.getBytes());
stream.write("\n".getBytes());
}
BufferedReader stderr = new BufferedReader(new InputStreamReader(pr.getErrorStream()));
OutputStream errStream = getContext().getEnv().err();
line = "";
while ((line = stderr.readLine()) != null) {
errStream.write(line.getBytes());
errStream.write("\n".getBytes());
}
try {
pr.waitFor();
} catch (InterruptedException e) {
throw new IOException(e);
}
throw new PythonExitException(this, pr.exitValue());
} catch (IOException e) {
throw raise(PythonErrorType.ValueError, "Could not execute script '%s'", e.getMessage());
}
}
}
@Builtin(name = "getcwd", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
public abstract static class CwdNode extends PythonBuiltinNode {
@Specialization
String cwd() {
try {
return getContext().getEnv().getCurrentWorkingDirectory().getPath();
} catch (SecurityException e) {
return "";
}
}
}
@Builtin(name = "chdir", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class ChdirNode extends PythonBuiltinNode {
@Specialization
PNone chdir(String spath) {
Env env = getContext().getEnv();
try {
TruffleFile dir = env.getPublicTruffleFile(spath).getAbsoluteFile();
env.setCurrentWorkingDirectory(dir);
return PNone.NONE;
} catch (UnsupportedOperationException | IllegalArgumentException | SecurityException e) {
throw raise(PythonErrorType.FileNotFoundError, "No such file or directory: '%s'", spath);
}
}
@Specialization
PNone chdirPString(PString spath) {
return chdir(spath.getValue());
}
}
@Builtin(name = "getpid", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
public abstract static class GetPidNode extends PythonBuiltinNode {
@Specialization
int getPid() {
// TODO: this needs to be implemented properly at some point (consider managed execution
// as well)
return getContext().hashCode();
}
}
@Builtin(name = "getuid", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
public abstract static class GetUidNode extends PythonBuiltinNode {
@Specialization
int getPid() {
return getSystemUid();
}
@TruffleBoundary
int getSystemUid() {
String osName = System.getProperty("os.name");
if (osName.contains("Linux")) {
return (int) new com.sun.security.auth.module.UnixSystem().getUid();
}
return 1000;
}
}
@Builtin(name = "fstat", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class FstatNode extends PythonFileNode {
@Child private StatNode statNode;
protected abstract Object executeWith(VirtualFrame frame, Object fd);
@Specialization(guards = {"fd >= 0", "fd <= 2"})
Object fstatStd(@SuppressWarnings("unused") int fd) {
return factory().createTuple(new Object[]{
8592,
0, // ino
0, // dev
0, // nlink
0,
0,
0,
0,
0,
0
});
}
@Specialization(guards = "fd > 2")
Object fstat(VirtualFrame frame, int fd,
@Cached("create()") BranchProfile fstatForNonFile,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
PosixResources resources = getResources();
String filePath = resources.getFilePath(fd);
if (filePath != null) {
if (statNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
statNode = insert(StatNode.create());
}
return statNode.executeWith(frame, resources.getFilePath(fd), PNone.NO_VALUE);
} else {
fstatForNonFile.enter();
Channel fileChannel = resources.getFileChannel(fd, channelClassProfile);
int mode = 0;
if (fileChannel instanceof ReadableByteChannel) {
mode |= 0444;
}
if (fileChannel instanceof WritableByteChannel) {
mode |= 0222;
}
return factory().createTuple(new Object[]{
mode,
0, // ino
0, // dev
0, // nlink
0,
0,
0,
0,
0,
0,
});
}
}
@Specialization(limit = "getCallSiteInlineCacheMaxDepth()")
Object fstatPInt(VirtualFrame frame, Object fd,
@CachedLibrary("fd") PythonObjectLibrary lib,
@Cached("create()") FstatNode recursive) {
return recursive.executeWith(frame, lib.asSizeWithState(fd, PArguments.getThreadState(frame)));
}
protected static FstatNode create() {
return PosixModuleBuiltinsFactory.FstatNodeFactory.create(null);
}
}
@Builtin(name = "set_inheritable", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
public abstract static class SetInheritableNode extends PythonFileNode {
@Specialization(guards = {"fd >= 0", "fd <= 2"})
Object setInheritableStd(@SuppressWarnings("unused") int fd, @SuppressWarnings("unused") Object inheritable) {
// TODO: investigate if for the stdout/in/err this flag can be set
return PNone.NONE;
}
@Specialization(guards = "fd > 2")
Object setInheritable(VirtualFrame frame, int fd, @SuppressWarnings("unused") Object inheritable) {
Channel ch = getResources().getFileChannel(fd);
if (ch == null || ch instanceof PSocket) {
throw raiseOSError(frame, OSErrorEnum.EBADF.getNumber());
}
// TODO: investigate how to map this to the truffle file api (if supported)
return PNone.NONE;
}
}
@Builtin(name = "stat", minNumOfPositionalArgs = 1, parameterNames = {"path", "follow_symlinks"})
@GenerateNodeFactory
@ImportStatic(SpecialMethodNames.class)
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class StatNode extends PythonBinaryBuiltinNode {
private final BranchProfile fileNotFound = BranchProfile.create();
private static final int S_IFIFO = 0010000;
private static final int S_IFCHR = 0020000;
private static final int S_IFBLK = 0060000;
private static final int S_IFSOCK = 0140000;
private static final int S_IFLNK = 0120000;
private static final int S_IFDIR = 0040000;
private static final int S_IFREG = 0100000;
protected abstract Object executeWith(VirtualFrame frame, Object path, Object followSymlinks);
@Specialization
Object doStatPath(VirtualFrame frame, Object path, boolean followSymlinks,
@Cached CastToPathNode cast) {
return stat(cast.execute(frame, path), followSymlinks);
}
@Specialization(guards = "isNoValue(followSymlinks)")
Object doStatDefault(VirtualFrame frame, Object path, @SuppressWarnings("unused") PNone followSymlinks,
@Cached CastToPathNode cast) {
return stat(cast.execute(frame, path), true);
}
@TruffleBoundary
long fileTimeToSeconds(FileTime t) {
return t.to(TimeUnit.SECONDS);
}
@TruffleBoundary
Object stat(String path, boolean followSymlinks) {
TruffleFile f = getContext().getPublicTruffleFileRelaxed(path, PythonLanguage.DEFAULT_PYTHON_EXTENSIONS);
LinkOption[] linkOptions = followSymlinks ? new LinkOption[0] : new LinkOption[]{LinkOption.NOFOLLOW_LINKS};
try {
return unixStat(f, linkOptions);
} catch (UnsupportedOperationException unsupported) {
try {
return posixStat(f, linkOptions);
} catch (UnsupportedOperationException unsupported2) {
return basicStat(f, linkOptions);
}
}
}
private PTuple unixStat(TruffleFile file, LinkOption... linkOptions) {
try {
TruffleFile.Attributes attributes = file.getAttributes(Arrays.asList(
UNIX_MODE,
UNIX_INODE,
UNIX_DEV,
UNIX_NLINK,
UNIX_UID,
UNIX_GID,
SIZE,
LAST_ACCESS_TIME,
LAST_MODIFIED_TIME,
UNIX_CTIME), linkOptions);
return factory().createTuple(new Object[]{
attributes.get(UNIX_MODE),
attributes.get(UNIX_INODE),
attributes.get(UNIX_DEV),
attributes.get(UNIX_NLINK),
attributes.get(UNIX_UID),
attributes.get(UNIX_GID),
attributes.get(SIZE),
fileTimeToSeconds(attributes.get(LAST_ACCESS_TIME)),
fileTimeToSeconds(attributes.get(LAST_MODIFIED_TIME)),
fileTimeToSeconds(attributes.get(UNIX_CTIME)),
});
} catch (IOException | SecurityException e) {
throw fileNoFound(file.getPath());
}
}
private PTuple posixStat(TruffleFile file, LinkOption... linkOptions) {
try {
int mode = 0;
long size = 0;
long ctime = 0;
long atime = 0;
long mtime = 0;
long gid = 0;
long uid = 0;
TruffleFile.Attributes attributes = file.getAttributes(Arrays.asList(
IS_DIRECTORY,
IS_SYMBOLIC_LINK,
IS_REGULAR_FILE,
LAST_MODIFIED_TIME,
LAST_ACCESS_TIME,
CREATION_TIME,
SIZE,
UNIX_OWNER,
UNIX_GROUP,
UNIX_PERMISSIONS), linkOptions);
mode |= fileTypeBitsFromAttributes(attributes);
mtime = fileTimeToSeconds(attributes.get(LAST_MODIFIED_TIME));
ctime = fileTimeToSeconds(attributes.get(CREATION_TIME));
atime = fileTimeToSeconds(attributes.get(LAST_ACCESS_TIME));
size = attributes.get(SIZE);
UserPrincipal owner = attributes.get(UNIX_OWNER);
if (owner instanceof UnixNumericUserPrincipal) {
try {
uid = strToLong(((UnixNumericUserPrincipal) owner).getName());
} catch (NumberFormatException e2) {
}
}
GroupPrincipal group = attributes.get(UNIX_GROUP);
if (group instanceof UnixNumericGroupPrincipal) {
try {
gid = strToLong(((UnixNumericGroupPrincipal) group).getName());
} catch (NumberFormatException e2) {
}
}
final Set<PosixFilePermission> posixFilePermissions = attributes.get(UNIX_PERMISSIONS);
mode = posixPermissionsToMode(mode, posixFilePermissions);
int inode = getInode(file);
return factory().createTuple(new Object[]{
mode,
inode, // ino
0, // dev
0, // nlink
uid,
gid,
size,
atime,
mtime,
ctime,
});
} catch (IOException | SecurityException e) {
throw fileNoFound(file.getPath());
}
}
private PTuple basicStat(TruffleFile file, LinkOption... linkOptions) {
try {
int mode = 0;
long size = 0;
long ctime = 0;
long atime = 0;
long mtime = 0;
long gid = 0;
long uid = 0;
TruffleFile.Attributes attributes = file.getAttributes(Arrays.asList(
IS_DIRECTORY,
IS_SYMBOLIC_LINK,
IS_REGULAR_FILE,
LAST_MODIFIED_TIME,
LAST_ACCESS_TIME,
CREATION_TIME,
SIZE), linkOptions);
mode |= fileTypeBitsFromAttributes(attributes);
mtime = fileTimeToSeconds(attributes.get(LAST_MODIFIED_TIME));
ctime = fileTimeToSeconds(attributes.get(CREATION_TIME));
atime = fileTimeToSeconds(attributes.get(LAST_ACCESS_TIME));
size = attributes.get(SIZE);
if (file.isReadable()) {
mode |= 0004;
mode |= 0040;
mode |= 0400;
}
if (file.isWritable()) {
mode |= 0002;
mode |= 0020;
mode |= 0200;
}
if (file.isExecutable()) {
mode |= 0001;
mode |= 0010;
mode |= 0100;
}
int inode = getInode(file);
return factory().createTuple(new Object[]{
mode,
inode, // ino
0, // dev
0, // nlink
uid,
gid,
size,
atime,
mtime,
ctime,
});
} catch (IOException | SecurityException e) {
throw fileNoFound(file.getPath());
}
}
private static int fileTypeBitsFromAttributes(TruffleFile.Attributes attributes) {
int mode = 0;
if (attributes.get(IS_REGULAR_FILE)) {
mode |= S_IFREG;
} else if (attributes.get(IS_DIRECTORY)) {
mode |= S_IFDIR;
} else if (attributes.get(IS_SYMBOLIC_LINK)) {
mode |= S_IFLNK;
} else {
// TODO: differentiate these
mode |= S_IFSOCK | S_IFBLK | S_IFCHR | S_IFIFO;
}
return mode;
}
private int getInode(TruffleFile file) {
TruffleFile canonical;
try {
canonical = file.getCanonicalFile();
} catch (IOException | SecurityException e) {
// best effort
canonical = file.getAbsoluteFile();
}
return getContext().getResources().getInodeId(canonical.getPath());
}
private PException fileNoFound(String path) {
fileNotFound.enter();
throw raise(FileNotFoundError, "No such file or directory: '%s'", path);
}
@TruffleBoundary(allowInlining = true, transferToInterpreterOnException = false)
private static long strToLong(String name) throws NumberFormatException {
return Long.decode(name).longValue();
}
@TruffleBoundary(allowInlining = true)
private static int posixPermissionsToMode(int inputMode, final Set<PosixFilePermission> posixFilePermissions) {
int mode = inputMode;
if (posixFilePermissions.contains(PosixFilePermission.OTHERS_READ)) {
mode |= 0004;
}
if (posixFilePermissions.contains(PosixFilePermission.OTHERS_WRITE)) {
mode |= 0002;
}
if (posixFilePermissions.contains(PosixFilePermission.OTHERS_EXECUTE)) {
mode |= 0001;
}
if (posixFilePermissions.contains(PosixFilePermission.GROUP_READ)) {
mode |= 0040;
}
if (posixFilePermissions.contains(PosixFilePermission.GROUP_WRITE)) {
mode |= 0020;
}
if (posixFilePermissions.contains(PosixFilePermission.GROUP_EXECUTE)) {
mode |= 0010;
}
if (posixFilePermissions.contains(PosixFilePermission.OWNER_READ)) {
mode |= 0400;
}
if (posixFilePermissions.contains(PosixFilePermission.OWNER_WRITE)) {
mode |= 0200;
}
if (posixFilePermissions.contains(PosixFilePermission.OWNER_EXECUTE)) {
mode |= 0100;
}
return mode;
}
public static StatNode create() {
return StatNodeFactory.create();
}
}
@Builtin(name = "listdir", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class ListdirNode extends PythonBuiltinNode {
@Specialization
Object listdir(VirtualFrame frame, Object pathArg,
@Cached CastToPathNode cast,
@Cached PRaiseOSErrorNode raiseOS) {
String path = cast.execute(frame, pathArg);
try {
TruffleFile file = getContext().getPublicTruffleFileRelaxed(path, PythonLanguage.DEFAULT_PYTHON_EXTENSIONS);
Collection<TruffleFile> listFiles = file.list();
Object[] filenames = listToArray(listFiles);
return factory().createList(filenames);
} catch (NoSuchFileException e) {
throw raiseOS.raiseOSError(frame, OSErrorEnum.ENOENT, path);
} catch (SecurityException e) {
throw raiseOS.raiseOSError(frame, OSErrorEnum.EPERM, path);
} catch (IOException e) {
throw raiseOS.raiseOSError(frame, OSErrorEnum.ENOTDIR, path);
}
}
@TruffleBoundary(allowInlining = true, transferToInterpreterOnException = false)
private static Object[] listToArray(Collection<TruffleFile> listFiles) {
Object[] filenames = new Object[listFiles.size()];
int i = 0;
for (TruffleFile f : listFiles) {
filenames[i] = f.getName();
i += 1;
}
return filenames;
}
}
@Builtin(name = "ScandirIterator", minNumOfPositionalArgs = 2, constructsClass = PythonBuiltinClassType.PScandirIterator, isPublic = true)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class ScandirIterNode extends PythonBinaryBuiltinNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
Object doit(VirtualFrame frame, LazyPythonClass cls, Object pathArg,
@Cached CastToPathNode cast) {
String path = cast.execute(frame, pathArg);
try {
TruffleFile file = getContext().getEnv().getPublicTruffleFile(path);
return factory().createScandirIterator(cls, path, file.newDirectoryStream());
} catch (SecurityException | IOException e) {
gotException.enter();
throw raise(OSError, path);
}
}
}
@Builtin(name = "DirEntry", minNumOfPositionalArgs = 3, constructsClass = PythonBuiltinClassType.PDirEntry, isPublic = true)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class DirEntryNode extends PythonTernaryBuiltinNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
Object doit(VirtualFrame frame, LazyPythonClass cls, String name, Object pathArg,
@Cached CastToPathNode cast) {
String path = cast.execute(frame, pathArg);
try {
TruffleFile dir = getContext().getEnv().getPublicTruffleFile(path);
TruffleFile file = dir.resolve(name);
return factory().createDirEntry(cls, name, file);
} catch (SecurityException | InvalidPathException e) {
gotException.enter();
throw raise(OSError, path);
}
}
}
@Builtin(name = "dup", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
abstract static class DupNode extends PythonFileNode {
@Specialization
int dupInt(int fd) {
return getResources().dup(fd);
}
@Specialization(replaces = "dupInt")
int dupGeneric(Object fd,
@Cached CastToJavaIntNode castToJavaIntNode) {
return getResources().dup(castToJavaIntNode.execute(fd));
}
}
@Builtin(name = "dup2", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class Dup2Node extends PythonFileNode {
@Specialization
int dup(int fd, int fd2) {
try {
return getResources().dup2(fd, fd2);
} catch (IOException e) {
throw raise(OSError, "invalid fd %r", fd2);
}
}
@Specialization(rewriteOn = ArithmeticException.class)
int dupPInt(PInt fd, PInt fd2) {
try {
return getResources().dup2(fd.intValueExact(), fd2.intValueExact());
} catch (IOException e) {
throw raise(OSError, "invalid fd %r", fd2);
}
}
@Specialization(replaces = "dupPInt")
int dupOvf(PInt fd, PInt fd2) {
try {
return dupPInt(fd, fd2);
} catch (ArithmeticException e) {
throw raise(OSError, "invalid fd %r", fd);
}
}
}
@Builtin(name = "open", minNumOfPositionalArgs = 2, parameterNames = {"pathname", "flags", "mode", "dir_fd"})
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class OpenNode extends PythonFileNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization(guards = {"isNoValue(mode)", "isNoValue(dir_fd)"})
Object open(VirtualFrame frame, Object pathname, int flags, @SuppressWarnings("unused") PNone mode, PNone dir_fd,
@Cached CastToPathNode cast) {
return openMode(frame, pathname, flags, 0777, dir_fd, cast);
}
@Specialization(guards = {"isNoValue(dir_fd)"})
Object openMode(VirtualFrame frame, Object pathArg, int flags, int fileMode, @SuppressWarnings("unused") PNone dir_fd,
@Cached CastToPathNode cast) {
String pathname = cast.execute(frame, pathArg);
Set<StandardOpenOption> options = flagsToOptions(flags);
FileAttribute<Set<PosixFilePermission>>[] attributes = modeToAttributes(fileMode);
try {
SeekableByteChannel fc;
TruffleFile truffleFile = getContext().getPublicTruffleFileRelaxed(pathname, PythonLanguage.DEFAULT_PYTHON_EXTENSIONS);
if (options.contains(StandardOpenOption.DELETE_ON_CLOSE)) {
truffleFile = getContext().getEnv().createTempFile(truffleFile, null, null);
options.remove(StandardOpenOption.CREATE_NEW);
options.remove(StandardOpenOption.DELETE_ON_CLOSE);
options.add(StandardOpenOption.CREATE);
getContext().registerShutdownHook(new FileDeleteShutdownHook(truffleFile));
}
fc = truffleFile.newByteChannel(options, attributes);
return getResources().open(truffleFile, fc);
} catch (NoSuchFileException e) {
gotException.enter();
throw raiseOSError(frame, OSErrorEnum.ENOENT, e.getFile());
} catch (AccessDeniedException e) {
gotException.enter();
throw raiseOSError(frame, OSErrorEnum.EACCES, e.getFile());
} catch (FileSystemException e) {
gotException.enter();
// TODO FileSystemException can have more reasons, not only is a directory -> should
// be handled more accurate
throw raiseOSError(frame, OSErrorEnum.EISDIR, e.getFile());
} catch (IOException e) {
gotException.enter();
// if this happen, we should raise OSError with appropriate errno
throw raiseOSError(frame, -1);
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
@TruffleBoundary(allowInlining = true)
private static FileAttribute<Set<PosixFilePermission>>[] modeToAttributes(int fileMode) {
FileAttribute<Set<PosixFilePermission>> fa1 = PosixFilePermissions.asFileAttribute(new HashSet<>(Arrays.asList(otherBitsToPermission[fileMode & 7])));
FileAttribute<Set<PosixFilePermission>> fa2 = PosixFilePermissions.asFileAttribute(new HashSet<>(Arrays.asList(groupBitsToPermission[fileMode >> 3 & 7])));
FileAttribute<Set<PosixFilePermission>> fa3 = PosixFilePermissions.asFileAttribute(new HashSet<>(Arrays.asList(ownerBitsToPermission[fileMode >> 6 & 7])));
return new FileAttribute[]{fa1, fa2, fa3};
}
@TruffleBoundary(allowInlining = true)
private static Set<StandardOpenOption> flagsToOptions(int flags) {
Set<StandardOpenOption> options = new HashSet<>();
if ((flags & WRONLY) != 0) {
options.add(StandardOpenOption.WRITE);
} else if ((flags & RDWR) != 0) {
options.add(StandardOpenOption.READ);
options.add(StandardOpenOption.WRITE);
} else {
options.add(StandardOpenOption.READ);
}
if ((flags & CREAT) != 0) {
options.add(StandardOpenOption.WRITE);
options.add(StandardOpenOption.CREATE);
}
if ((flags & EXCL) != 0) {
options.add(StandardOpenOption.WRITE);
options.add(StandardOpenOption.CREATE_NEW);
}
if ((flags & APPEND) != 0) {
options.add(StandardOpenOption.WRITE);
options.add(StandardOpenOption.APPEND);
}
if ((flags & NDELAY) != 0 || (flags & DIRECT) != 0) {
options.add(StandardOpenOption.DSYNC);
}
if ((flags & SYNC) != 0) {
options.add(StandardOpenOption.SYNC);
}
if ((flags & TRUNC) != 0) {
options.add(StandardOpenOption.WRITE);
options.add(StandardOpenOption.TRUNCATE_EXISTING);
}
if ((flags & TMPFILE) != 0) {
options.add(StandardOpenOption.DELETE_ON_CLOSE);
}
return options;
}
}
@Builtin(name = "lseek", minNumOfPositionalArgs = 3)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class LseekNode extends PythonFileNode {
private final BranchProfile gotException = BranchProfile.create();
private final ConditionProfile noFile = ConditionProfile.createBinaryProfile();
@Specialization
Object lseek(VirtualFrame frame, long fd, long pos, int how,
@Shared("channelClassProfile") @Cached("createClassProfile()") ValueProfile channelClassProfile) {
Channel channel = getResources().getFileChannel((int) fd, channelClassProfile);
if (noFile.profile(!(channel instanceof SeekableByteChannel))) {
throw raiseOSError(frame, OSErrorEnum.ESPIPE);
}
SeekableByteChannel fc = (SeekableByteChannel) channel;
try {
return setPosition(pos, how, fc);
} catch (IOException e) {
gotException.enter();
// if this happen, we should raise OSError with appropriate errno
throw raiseOSError(frame, -1);
}
}
@Specialization
Object lseekGeneric(VirtualFrame frame, Object fd, Object pos, Object how,
@Shared("channelClassProfile") @Cached("createClassProfile()") ValueProfile channelClassProfile,
@Cached CoerceToJavaLongNode castFdNode,
@Cached CoerceToJavaLongNode castPosNode,
@Cached CastToJavaIntNode castHowNode) {
return lseek(frame, castFdNode.execute(fd), castPosNode.execute(pos), castHowNode.execute(how), channelClassProfile);
}
@TruffleBoundary(allowInlining = true)
private static Object setPosition(long pos, int how, SeekableByteChannel fc) throws IOException {
switch (how) {
case SEEK_CUR:
fc.position(fc.position() + pos);
break;
case SEEK_END:
fc.position(fc.size() + pos);
break;
case SEEK_SET:
default:
fc.position(pos);
}
return fc.position();
}
}
@Builtin(name = "close", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class CloseNode extends PythonFileNode {
private final ConditionProfile noFile = ConditionProfile.createBinaryProfile();
@Specialization(limit = "getCallSiteInlineCacheMaxDepth()")
Object close(VirtualFrame frame, Object fdObject,
@CachedLibrary("fdObject") PythonObjectLibrary lib,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
int fd = lib.asSizeWithState(fdObject, PArguments.getThreadState(frame));
PosixResources resources = getResources();
Channel channel = resources.getFileChannel(fd, channelClassProfile);
if (noFile.profile(channel == null)) {
throw raise(OSError, "invalid fd");
} else {
resources.close(fd);
}
return PNone.NONE;
}
@TruffleBoundary(allowInlining = true, transferToInterpreterOnException = false)
private static void closeChannel(Channel channel) throws IOException {
channel.close();
}
}
@Builtin(name = "unlink", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class UnlinkNode extends PythonFileNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
Object unlink(VirtualFrame frame, Object pathArg,
@Cached CastToPathNode cast) {
String path = cast.execute(frame, pathArg);
try {
getContext().getEnv().getPublicTruffleFile(path).delete();
} catch (RuntimeException | IOException e) {
gotException.enter();
throw raise(OSError, e);
}
return PNone.NONE;
}
}
@Builtin(name = "remove", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class RemoveNode extends UnlinkNode {
}
@Builtin(name = "rmdir", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class RmdirNode extends UnlinkNode {
}
@Builtin(name = "mkdir", minNumOfPositionalArgs = 1, parameterNames = {"path", "mode", "dir_fd"})
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class MkdirNode extends PythonFileNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
Object mkdir(VirtualFrame frame, Object path, @SuppressWarnings("unused") PNone mode, PNone dirFd,
@Cached CastToPathNode cast) {
return mkdirMode(frame, path, 511, dirFd, cast);
}
@Specialization
Object mkdirMode(VirtualFrame frame, Object pathArg, @SuppressWarnings("unused") int mode, @SuppressWarnings("unused") PNone dirFd,
@Cached CastToPathNode cast) {
String path = cast.execute(frame, pathArg);
try {
getContext().getEnv().getPublicTruffleFile(path).createDirectory();
} catch (FileAlreadyExistsException e) {
throw raiseOSError(frame, OSErrorEnum.EEXIST, path);
} catch (RuntimeException | IOException e) {
gotException.enter();
// if this happen, we should raise OSError with appropriate errno
throw raiseOSError(frame, -1);
}
return PNone.NONE;
}
}
@Builtin(name = "write", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class WriteNode extends PythonFileNode {
@Child private SequenceStorageNodes.ToByteArrayNode toByteArrayNode;
private final BranchProfile gotException = BranchProfile.create();
private final BranchProfile notWritable = BranchProfile.create();
public abstract Object executeWith(VirtualFrame frame, Object fd, Object data);
@Specialization
Object write(int fd, byte[] data,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
Channel channel = getResources().getFileChannel(fd, channelClassProfile);
if (channel instanceof WritableByteChannel) {
try {
return doWriteOp(data, (WritableByteChannel) channel);
} catch (NonWritableChannelException | IOException e) {
gotException.enter();
throw raise(OSError, e);
}
} else {
notWritable.enter();
throw raise(OSError, "file not opened for writing");
}
}
@TruffleBoundary(allowInlining = true, transferToInterpreterOnException = false)
private static int doWriteOp(byte[] data, WritableByteChannel channel) throws IOException {
return channel.write(ByteBuffer.wrap(data));
}
@Specialization
Object write(int fd, String data,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
return write(fd, stringToBytes(data), channelClassProfile);
}
@TruffleBoundary
private static byte[] stringToBytes(String data) {
return data.getBytes();
}
@Specialization
Object write(int fd, PBytes data,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
return write(fd, getByteArray(data), channelClassProfile);
}
@Specialization
Object write(int fd, PByteArray data,
@Cached("createClassProfile()") ValueProfile channelClassProfile) {
return write(fd, getByteArray(data), channelClassProfile);
}
@Specialization(limit = "getCallSiteInlineCacheMaxDepth()")
Object writePInt(VirtualFrame frame, Object fd, Object data,
@CachedLibrary("fd") PythonObjectLibrary lib,
@Cached("create()") WriteNode recursive) {
return recursive.executeWith(frame, lib.asSizeWithState(fd, PArguments.getThreadState(frame)), data);
}
private byte[] getByteArray(PIBytesLike pByteArray) {
if (toByteArrayNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
toByteArrayNode = insert(ToByteArrayNodeGen.create());
}
return toByteArrayNode.execute(pByteArray.getSequenceStorage());
}
public static WriteNode create() {
return PosixModuleBuiltinsFactory.WriteNodeFactory.create(null);
}
}
@Builtin(name = "read", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class ReadNode extends PythonFileNode {
@CompilationFinal private BranchProfile tooLargeProfile = BranchProfile.create();
@Specialization
Object readLong(@SuppressWarnings("unused") VirtualFrame frame, int fd, long requestedSize,
@Shared("profile") @Cached("createClassProfile()") ValueProfile channelClassProfile,
@Shared("readNode") @Cached ReadFromChannelNode readNode) {
int size;
try {
size = Math.toIntExact(requestedSize);
} catch (ArithmeticException e) {
tooLargeProfile.enter();
size = ReadFromChannelNode.MAX_READ;
}
Channel channel = getResources().getFileChannel(fd, channelClassProfile);
ByteSequenceStorage array = readNode.execute(channel, size);
return factory().createBytes(array);
}
@Specialization
Object read(@SuppressWarnings("unused") VirtualFrame frame, int fd, Object requestedSize,
@Shared("profile") @Cached("createClassProfile()") ValueProfile channelClassProfile,
@Shared("readNode") @Cached ReadFromChannelNode readNode,
@Cached CoerceToJavaLongNode castToLongNode) {
return readLong(frame, fd, castToLongNode.execute(requestedSize), channelClassProfile, readNode);
}
@Specialization
Object readFdGeneric(@SuppressWarnings("unused") VirtualFrame frame, Object fd, Object requestedSize,
@Shared("profile") @Cached("createClassProfile()") ValueProfile channelClassProfile,
@Shared("readNode") @Cached ReadFromChannelNode readNode,
@Cached CoerceToJavaLongNode castToLongNode,
@Cached CastToJavaIntNode castToIntNode) {
return readLong(frame, castToIntNode.execute(fd), castToLongNode.execute(requestedSize), channelClassProfile, readNode);
}
}
@Builtin(name = "isatty", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class IsATTYNode extends PythonBuiltinNode {
@Specialization
boolean isATTY(long fd) {
if (fd >= 0 && fd <= 2) {
return terminalIsInteractive(getContext());
} else {
return false;
}
}
@Fallback
boolean isATTY(@SuppressWarnings("unused") Object fd) {
return false;
}
}
@Builtin(name = "_exit", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
public abstract static class ExitNode extends PythonBuiltinNode {
@Specialization
Object exit(int status) {
throw new PythonExitException(this, status);
}
}
@Builtin(name = "chmod", minNumOfPositionalArgs = 2, parameterNames = {"path", "mode", "dir_fd", "follow_symlinks"})
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class ChmodNode extends PythonBuiltinNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
Object chmod(VirtualFrame frame, Object path, long mode, @SuppressWarnings("unused") PNone dir_fd, @SuppressWarnings("unused") PNone follow_symlinks,
@Cached CastToPathNode cast) {
return chmodFollow(frame, path, mode, dir_fd, true, cast);
}
@Specialization
Object chmodFollow(VirtualFrame frame, Object pathArg, long mode, @SuppressWarnings("unused") PNone dir_fd, boolean follow_symlinks,
@Cached CastToPathNode cast) {
String path = cast.execute(frame, pathArg);
Set<PosixFilePermission> permissions = modeToPermissions(mode);
try {
TruffleFile truffleFile = getContext().getEnv().getPublicTruffleFile(path);
if (!follow_symlinks) {
truffleFile = truffleFile.getCanonicalFile(LinkOption.NOFOLLOW_LINKS);
} else {
truffleFile = truffleFile.getCanonicalFile();
}
truffleFile.setPosixPermissions(permissions);
} catch (IOException e) {
gotException.enter();
throw raise(OSError, e);
}
return PNone.NONE;
}
@TruffleBoundary(allowInlining = true)
private static Set<PosixFilePermission> modeToPermissions(long mode) {
Set<PosixFilePermission> permissions = new HashSet<>(Arrays.asList(otherBitsToPermission[(int) (mode & 7)]));
permissions.addAll(Arrays.asList(groupBitsToPermission[(int) (mode >> 3 & 7)]));
permissions.addAll(Arrays.asList(ownerBitsToPermission[(int) (mode >> 6 & 7)]));
return permissions;
}
}
@Builtin(name = "utime", minNumOfPositionalArgs = 1, parameterNames = {"path", "times", "ns", "dir_fd", "follow_symlinks"})
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class UtimeNode extends PythonBuiltinNode {
@Child private GetItemNode getItemNode;
@Child private LenNode lenNode;
@Child private CastToPathNode castNode = CastToPathNode.create();
@SuppressWarnings("unused")
@Specialization
Object utime(VirtualFrame frame, Object path, PNone times, PNone ns, PNone dir_fd, PNone follow_symlinks) {
long time = ((Double) TimeModuleBuiltins.timeSeconds()).longValue();
setMtime(getFile(castNode.execute(frame, path), true), time);
setAtime(getFile(castNode.execute(frame, path), true), time);
return PNone.NONE;
}
@SuppressWarnings("unused")
@Specialization
Object utime(VirtualFrame frame, Object path, PTuple times, PNone ns, PNone dir_fd, PNone follow_symlinks) {
long atime = getTime(frame, times, 0, "times");
long mtime = getTime(frame, times, 1, "times");
setMtime(getFile(castNode.execute(frame, path), true), mtime);
setAtime(getFile(castNode.execute(frame, path), true), atime);
return PNone.NONE;
}
@SuppressWarnings("unused")
@Specialization
Object utime(VirtualFrame frame, Object path, PNone times, PTuple ns, PNone dir_fd, PNone follow_symlinks) {
long atime = getTime(frame, ns, 0, "ns") / 1000;
long mtime = getTime(frame, ns, 1, "ns") / 1000;
setMtime(getFile(castNode.execute(frame, path), true), mtime);
setAtime(getFile(castNode.execute(frame, path), true), atime);
return PNone.NONE;
}
@SuppressWarnings("unused")
@Specialization
Object utime(VirtualFrame frame, Object path, PNone times, PTuple ns, PNone dir_fd, boolean follow_symlinks) {
long atime = getTime(frame, ns, 0, "ns") / 1000;
long mtime = getTime(frame, ns, 1, "ns") / 1000;
setMtime(getFile(castNode.execute(frame, path), true), mtime);
setAtime(getFile(castNode.execute(frame, path), true), atime);
return PNone.NONE;
}
@SuppressWarnings("unused")
@Specialization(guards = {"!isPNone(times)", "!isPTuple(times)"})
Object utimeWrongTimes(VirtualFrame frame, Object path, Object times, Object ns, Object dir_fd, Object follow_symlinks) {
throw tupleError("times");
}
@SuppressWarnings("unused")
@Specialization(guards = {"!isPTuple(ns)", "!isPNone(ns)"})
Object utimeWrongNs(VirtualFrame frame, Object path, PNone times, Object ns, Object dir_fd, Object follow_symlinks) {
throw tupleError("ns");
}
@SuppressWarnings("unused")
@Specialization(guards = {"!isPNone(ns)"})
Object utimeWrongNs(VirtualFrame frame, Object path, PTuple times, Object ns, Object dir_fd, Object follow_symlinks) {
throw raise(ValueError, "utime: you may specify either 'times' or 'ns' but not both");
}
@SuppressWarnings("unused")
@Fallback
Object utimeError(VirtualFrame frame, Object path, Object times, Object ns, Object dir_fd, Object follow_symlinks) {
throw raise(NotImplementedError, "utime");
}
private long getTime(VirtualFrame frame, PTuple times, int index, String argname) {
if (getLength(times) <= index) {
throw tupleError(argname);
}
if (getItemNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
getItemNode = insert(GetItemNode.createNotNormalized());
}
Object mtimeObj = getItemNode.execute(frame, times.getSequenceStorage(), index);
long mtime;
if (mtimeObj instanceof Integer) {
mtime = ((Integer) mtimeObj).longValue();
} else if (mtimeObj instanceof Long) {
mtime = ((Long) mtimeObj).longValue();
} else if (mtimeObj instanceof PInt) {
mtime = ((PInt) mtimeObj).longValue();
} else if (mtimeObj instanceof Double) {
mtime = ((Double) mtimeObj).longValue();
} else if (mtimeObj instanceof PFloat) {
mtime = (long) ((PFloat) mtimeObj).getValue();
} else {
throw tupleError(argname);
}
if (mtime < 0) {
throw raise(ValueError, "time cannot be negative");
}
return mtime;
}
private PException tupleError(String argname) {
return raise(TypeError, "utime: '%s' must be either a tuple of two ints or None", argname);
}
private void setMtime(TruffleFile truffleFile, long mtime) {
try {
truffleFile.setLastModifiedTime(FileTime.from(mtime, TimeUnit.SECONDS));
} catch (IOException | SecurityException e) {
throw raise();
}
}
private void setAtime(TruffleFile truffleFile, long mtime) {
try {
truffleFile.setLastAccessTime(FileTime.from(mtime, TimeUnit.SECONDS));
} catch (IOException | SecurityException e) {
throw raise();
}
}
private TruffleFile getFile(String path, boolean followSymlinks) {
TruffleFile truffleFile = getContext().getEnv().getPublicTruffleFile(path);
if (!followSymlinks) {
try {
truffleFile = truffleFile.getCanonicalFile(LinkOption.NOFOLLOW_LINKS);
} catch (IOException | SecurityException e) {
throw raise();
}
}
return truffleFile;
}
private PException raise() {
throw raise(ValueError, "Operation not allowed");
}
private int getLength(PTuple times) {
if (lenNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
lenNode = insert(SequenceNodes.LenNode.create());
}
return lenNode.execute(times);
}
}
@Builtin(name = "waitpid", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
abstract static class WaitpidNode extends PythonFileNode {
@SuppressWarnings("unused")
@Specialization
PTuple waitpid(VirtualFrame frame, int pid, int options) {
try {
if (options == 0) {
int exitStatus = getResources().waitpid(pid);
return factory().createTuple(new Object[]{pid, exitStatus});
} else if (options == WNOHANG) {
int exitStatus = getResources().exitStatus(pid);
if (exitStatus == Integer.MIN_VALUE) {
// not terminated, yet, we should return 0
return factory().createTuple(new Object[]{0, 0});
} else {
return factory().createTuple(new Object[]{pid, exitStatus});
}
} else {
throw raise(PythonBuiltinClassType.NotImplementedError, "Only 0 or WNOHANG are supported for waitpid");
}
} catch (IndexOutOfBoundsException e) {
throw raiseOSError(frame, OSErrorEnum.ESRCH.getNumber());
} catch (InterruptedException e) {
throw raiseOSError(frame, OSErrorEnum.EINTR.getNumber());
}
}
@SuppressWarnings("unused")
@Specialization
PTuple waitpidFallback(VirtualFrame frame, Object pid, Object options,
@CachedLibrary(limit = "2") PythonObjectLibrary lib) {
ThreadState threadState = PArguments.getThreadState(frame);
return waitpid(frame, lib.asSizeWithState(pid, threadState), lib.asSizeWithState(options, threadState));
}
}
@Builtin(name = "system", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class SystemNode extends PythonBuiltinNode {
static final String[] shell;
static {
String osProperty = System.getProperty("os.name");
shell = osProperty != null && osProperty.toLowerCase(Locale.ENGLISH).startsWith("windows") ? new String[]{"cmd.exe", "/c"}
: new String[]{(System.getenv().getOrDefault("SHELL", "sh")), "-c"};
}
static class PipePump extends Thread {
private static final int MAX_READ = 8192;
private final InputStream in;
private final OutputStream out;
private final byte[] buffer;
private volatile boolean finish;
public PipePump(String name, InputStream in, OutputStream out) {
this.setName(name);
this.in = in;
this.out = out;
this.buffer = new byte[MAX_READ];
this.finish = false;
}
@Override
public void run() {
try {
while (!finish || in.available() > 0) {
if (Thread.interrupted()) {
finish = true;
}
int read = in.read(buffer, 0, Math.min(MAX_READ, in.available()));
if (read == -1) {
return;
}
out.write(buffer, 0, read);
}
} catch (IOException e) {
}
}
public void finish() {
finish = true;
// Make ourselves max priority to flush data out as quickly as possible
setPriority(Thread.MAX_PRIORITY);
Thread.yield();
}
}
@TruffleBoundary
@Specialization
int system(String cmd) {
PythonContext context = getContext();
if (!context.isExecutableAccessAllowed()) {
return -1;
}
PythonLanguage.getLogger().fine(() -> "os.system: " + cmd);
String[] command = new String[]{shell[0], shell[1], cmd};
Env env = context.getEnv();
try {
ProcessBuilder pb = new ProcessBuilder(command);
pb.directory(new File(env.getCurrentWorkingDirectory().getPath()));
PipePump stdout = null, stderr = null;
boolean stdsArePipes = !terminalIsInteractive(context);
if (stdsArePipes) {
pb.redirectInput(Redirect.PIPE);
pb.redirectOutput(Redirect.PIPE);
pb.redirectError(Redirect.PIPE);
} else {
pb.inheritIO();
}
Process proc = pb.start();
if (stdsArePipes) {
proc.getOutputStream().close(); // stdin will be closed
stdout = new PipePump(cmd + " [stdout]", proc.getInputStream(), env.out());
stderr = new PipePump(cmd + " [stderr]", proc.getErrorStream(), env.err());
stdout.start();
stderr.start();
}
int exitStatus = proc.waitFor();
if (stdsArePipes) {
stdout.finish();
stderr.finish();
}
return exitStatus;
} catch (IOException | InterruptedException e) {
return -1;
}
}
}
@Builtin(name = "pipe", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class PipeNode extends PythonFileNode {
private final BranchProfile gotException = BranchProfile.create();
@Specialization
PTuple pipe() {
int[] pipe;
try {
pipe = getResources().pipe();
} catch (IOException e) {
gotException.enter();
throw raise(OSError, e);
}
return factory().createTuple(new Object[]{pipe[0], pipe[1]});
}
}
@Builtin(name = "rename", minNumOfPositionalArgs = 2, takesVarArgs = true, takesVarKeywordArgs = true)
@GenerateNodeFactory
public abstract static class RenameNode extends PythonFileNode {
@Specialization
Object rename(VirtualFrame frame, Object src, Object dst, @SuppressWarnings("unused") Object[] args, @SuppressWarnings("unused") PNone kwargs,
@Cached CastToPathNode convertSrcNode,
@Cached CastToPathNode convertDstNode) {
return rename(convertSrcNode.execute(frame, src), convertDstNode.execute(frame, dst));
}
@Specialization
Object rename(VirtualFrame frame, Object src, Object dst, @SuppressWarnings("unused") Object[] args, PKeyword[] kwargs,
@Cached CastToPathNode convertSrcNode,
@Cached CastToPathNode convertDstNode) {
Object effectiveSrc = src;
Object effectiveDst = dst;
PosixResources resources = getResources();
for (int i = 0; i < kwargs.length; i++) {
Object value = kwargs[i].getValue();
if ("src_dir_fd".equals(kwargs[i].getName())) {
if (!(value instanceof Integer)) {
throw raise(OSError, "invalid file descriptor provided");
}
effectiveSrc = resources.getFilePath((int) value);
} else if ("dst_dir_fd".equals(kwargs[i].getName())) {
if (!(value instanceof Integer)) {
throw raise(OSError, "invalid file descriptor provided");
}
effectiveDst = resources.getFilePath((int) value);
}
}
return rename(convertSrcNode.execute(frame, effectiveSrc), convertDstNode.execute(frame, effectiveDst));
}
private Object rename(String src, String dst) {
try {
TruffleFile dstFile = getContext().getEnv().getPublicTruffleFile(dst);
if (dstFile.isDirectory()) {
throw raise(OSError, "%s is a directory", dst);
}
TruffleFile file = getContext().getEnv().getPublicTruffleFile(src);
file.move(dstFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
return PNone.NONE;
} catch (IOException e) {
throw raise(OSError, "cannot rename %s to %s", src, dst);
}
}
}
@Builtin(name = "replace", minNumOfPositionalArgs = 2, takesVarArgs = true, takesVarKeywordArgs = true)
@GenerateNodeFactory
public abstract static class ReplaceNode extends RenameNode {
}
@Builtin(name = "urandom", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class URandomNode extends PythonBuiltinNode {
@Specialization
@TruffleBoundary(allowInlining = true)
PBytes urandom(int size) {
// size is in bytes
BigInteger bigInteger = new BigInteger(size * 8, new Random());
// sign may introduce an extra byte
byte[] range = Arrays.copyOfRange(bigInteger.toByteArray(), 0, size);
return factory().createBytes(range);
}
}
@Builtin(name = "uname", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class UnameNode extends PythonBuiltinNode {
@Specialization
@TruffleBoundary(allowInlining = true)
PTuple uname() {
String sysname = SysModuleBuiltins.getPythonOSName();
String nodename = "";
try {
InetAddress addr;
addr = InetAddress.getLocalHost();
nodename = addr.getHostName();
} catch (UnknownHostException | SecurityException ex) {
}
String release = System.getProperty("os.version", "");
String version = "";
String machine = SysModuleBuiltins.getPythonArch();
return factory().createTuple(new Object[]{sysname, nodename, release, version, machine});
}
}
@Builtin(name = "access", minNumOfPositionalArgs = 2, varArgsMarker = true, keywordOnlyNames = {"dir_fd", "effective_ids", "follow_symlinks"})
@GenerateNodeFactory
public abstract static class AccessNode extends PythonBuiltinNode {
@Child private CastToPathNode castToPathNode;
private final BranchProfile notImplementedBranch = BranchProfile.create();
@Specialization(limit = "getCallSiteInlineCacheMaxDepth()")
boolean doGeneric(VirtualFrame frame, Object path, Object mode, @SuppressWarnings("unused") PNone dir_fd, @SuppressWarnings("unused") PNone effective_ids,
@SuppressWarnings("unused") PNone follow_symlinks,
@CachedLibrary("mode") PythonObjectLibrary lib) {
return access(castToPath(frame, path), lib.asSizeWithState(mode, PArguments.getThreadState(frame)), PNone.NONE, false, true);
}
private String castToPath(VirtualFrame frame, Object path) {
if (castToPathNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
castToPathNode = insert(CastToPathNode.create());
}
return castToPathNode.execute(frame, path);
}
@Specialization
boolean access(String path, int mode, Object dirFd, boolean effectiveIds, boolean followSymlinks) {
if (dirFd != PNone.NONE || effectiveIds) {
// TODO implement
notImplementedBranch.enter();
throw raise(NotImplementedError);
}
TruffleFile f = getContext().getEnv().getPublicTruffleFile(path);
LinkOption[] linkOptions = followSymlinks ? new LinkOption[0] : new LinkOption[]{LinkOption.NOFOLLOW_LINKS};
if (!f.exists(linkOptions)) {
return false;
}
boolean result = true;
if ((mode & X_OK) != 0) {
result = result && f.isExecutable();
}
if ((mode & R_OK) != 0) {
result = result && f.isReadable();
}
if ((mode & W_OK) != 0) {
result = result && f.isWritable();
}
return result;
}
}
@Builtin(name = "cpu_count", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
abstract static class CpuCountNode extends PythonBuiltinNode {
@Specialization
int getCpuCount() {
return Runtime.getRuntime().availableProcessors();
}
}
@Builtin(name = "umask", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
abstract static class UmaskNode extends PythonBuiltinNode {
@Specialization
int getAndSetUmask(int umask) {
if (umask == 0022) {
return 0022;
}
if (umask == 0) {
// TODO: change me, this does not really set the umask, workaround needed for pip
// it returns the previous mask (which in our case is always 0022)
return 0022;
} else {
throw raise(NotImplementedError, "setting the umask to anything other than the default");
}
}
}
@Builtin(name = "get_terminal_size", maxNumOfPositionalArgs = 1)
@GenerateNodeFactory
abstract static class GetTerminalSizeNode extends PythonUnaryBuiltinNode {
private static final String ERROR_MESSAGE = "[Errno 9] Bad file descriptor";
@Child private CastToIntegerFromIntNode castIntNode;
@Child private GetTerminalSizeNode recursiveNode;
@CompilationFinal private ConditionProfile errorProfile;
@CompilationFinal private ConditionProfile overflowProfile;
private CastToIntegerFromIntNode getCastIntNode() {
if (castIntNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
castIntNode = insert(CastToIntegerFromIntNode.create(val -> {
throw raise(PythonBuiltinClassType.TypeError, "an integer is required (got type %p)", val);
}));
}
return castIntNode;
}
private ConditionProfile getErrorProfile() {
if (errorProfile == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
errorProfile = ConditionProfile.createBinaryProfile();
}
return errorProfile;
}
private ConditionProfile getOverflowProfile() {
if (overflowProfile == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
overflowProfile = ConditionProfile.createBinaryProfile();
}
return overflowProfile;
}
@Specialization(guards = "isNone(fd)")
PTuple getTerminalSize(@SuppressWarnings("unused") PNone fd) {
if (getErrorProfile().profile(getContext().getResources().getFileChannel(0) == null)) {
throw raise(OSError, ERROR_MESSAGE);
}
return factory().createTuple(new Object[]{PythonOptions.getTerminalWidth(), PythonOptions.getTerminalHeight()});
}
@Specialization
PTuple getTerminalSize(int fd) {
if (getErrorProfile().profile(getContext().getResources().getFileChannel(fd) == null)) {
throw raise(OSError, ERROR_MESSAGE);
}
return factory().createTuple(new Object[]{PythonOptions.getTerminalWidth(), PythonOptions.getTerminalHeight()});
}
@Specialization
PTuple getTerminalSize(long fd) {
if (getOverflowProfile().profile(Integer.MIN_VALUE > fd || fd > Integer.MAX_VALUE)) {
raise(PythonErrorType.OverflowError, "Python int too large to convert to C long");
}
if (getErrorProfile().profile(getContext().getResources().getFileChannel((int) fd) == null)) {
throw raise(OSError, "[Errno 9] Bad file descriptor");
}
return factory().createTuple(new Object[]{PythonOptions.getTerminalWidth(), PythonOptions.getTerminalHeight()});
}
@Specialization
@TruffleBoundary
PTuple getTerminalSize(PInt fd) {
int value;
try {
value = fd.intValueExact();
if (getContext().getResources().getFileChannel(value) == null) {
throw raise(OSError, ERROR_MESSAGE);
}
} catch (ArithmeticException e) {
throw raise(PythonErrorType.OverflowError, "Python int too large to convert to C long");
}
return factory().createTuple(new Object[]{PythonOptions.getTerminalWidth(), PythonOptions.getTerminalHeight()});
}
@Fallback
Object getTerminalSize(VirtualFrame frame, Object fd) {
Object value = getCastIntNode().execute(fd);
if (recursiveNode == null) {
CompilerDirectives.transferToInterpreterAndInvalidate();
recursiveNode = create();
}
return recursiveNode.execute(frame, value);
}
protected GetTerminalSizeNode create() {
return PosixModuleBuiltinsFactory.GetTerminalSizeNodeFactory.create();
}
}
@Builtin(name = "readlink", minNumOfPositionalArgs = 1, parameterNames = {"path"}, varArgsMarker = true, keywordOnlyNames = {"dirFd"}, doc = "readlink(path, *, dir_fd=None) -> path\n" +
"\nReturn a string representing the path to which the symbolic link points.\n")
@GenerateNodeFactory
abstract static class ReadlinkNode extends PythonBinaryBuiltinNode {
@Specialization
String readlink(VirtualFrame frame, Object str, @SuppressWarnings("unused") PNone none,
@Cached CastToPathNode cast) {
try {
return getContext().getEnv().getPublicTruffleFile(cast.execute(frame, str)).getCanonicalFile().getPath();
} catch (IOException e) {
throw raise(OSError, e);
}
}
}
@Builtin(name = "strerror", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
public abstract static class StrErrorNode extends PythonBuiltinNode {
private static final HashMap<Integer, String> STR_ERROR_MAP = new HashMap<>();
@Specialization
String getStrError(int errno) {
if (STR_ERROR_MAP.isEmpty()) {
for (OSErrorEnum error : OSErrorEnum.values()) {
STR_ERROR_MAP.put(error.getNumber(), error.getMessage());
}
}
String result = STR_ERROR_MAP.get(errno);
if (result == null) {
result = "Unknown error " + errno;
}
return result;
}
}
@Builtin(name = "ctermid", minNumOfPositionalArgs = 0)
@GenerateNodeFactory
abstract static class CtermId extends PythonBuiltinNode {
@Specialization
String ctermid() {
return "/dev/tty";
}
}
@Builtin(name = "symlink", minNumOfPositionalArgs = 2, parameterNames = {"src", "dst", "target_is_directory", "dir_fd"})
@GenerateNodeFactory
public abstract static class SymlinkNode extends PythonBuiltinNode {
@Specialization(guards = {"isNoValue(targetIsDir)", "isNoValue(dirFd)"})
PNone doSimple(VirtualFrame frame, Object srcObj, Object dstObj, @SuppressWarnings("unused") PNone targetIsDir, @SuppressWarnings("unused") PNone dirFd,
@Cached CastToPathNode castSrcToPath,
@Cached CastToPathNode castDstToPath) {
String src = castSrcToPath.execute(frame, srcObj);
String dst = castDstToPath.execute(frame, dstObj);
Env env = getContext().getEnv();
TruffleFile dstFile = env.getPublicTruffleFile(dst);
try {
dstFile.createSymbolicLink(env.getPublicTruffleFile(src));
} catch (IOException e) {
throw raiseOSError(frame, OSErrorEnum.EIO, e);
}
return PNone.NONE;
}
}
@Builtin(name = "kill", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
@TypeSystemReference(PythonArithmeticTypes.class)
abstract static class KillNode extends PythonBinaryBuiltinNode {
private static final String[] KILL_SIGNALS = new String[]{"SIGKILL", "SIGQUIT", "SIGTRAP", "SIGABRT"};
private static final String[] TERMINATION_SIGNALS = new String[]{"SIGTERM", "SIGINT"};
@Specialization
PNone kill(VirtualFrame frame, int pid, int signal,
@Cached ReadAttributeFromObjectNode readSignalNode,
@Cached IsNode isNode) {
PythonContext context = getContext();
PythonModule signalModule = context.getCore().lookupBuiltinModule("_signal");
for (String name : TERMINATION_SIGNALS) {
Object value = readSignalNode.execute(signalModule, name);
if (isNode.execute(signal, value)) {
try {
context.getResources().sigterm(pid);
} catch (IndexOutOfBoundsException e) {
throw raiseOSError(frame, OSErrorEnum.ESRCH.getNumber());
}
return PNone.NONE;
}
}
for (String name : KILL_SIGNALS) {
Object value = readSignalNode.execute(signalModule, name);
if (isNode.execute(signal, value)) {
try {
context.getResources().sigkill(pid);
} catch (IndexOutOfBoundsException e) {
throw raiseOSError(frame, OSErrorEnum.ESRCH.getNumber());
}
return PNone.NONE;
}
}
Object dfl = readSignalNode.execute(signalModule, "SIG_DFL");
if (isNode.execute(signal, dfl)) {
try {
context.getResources().sigdfl(pid);
} catch (IndexOutOfBoundsException e) {
throw raiseOSError(frame, OSErrorEnum.ESRCH.getNumber());
}
return PNone.NONE;
}
throw raise(PythonBuiltinClassType.NotImplementedError, "Sending arbitrary signals to child processes. Can only send some kill and term signals.");
}
@Specialization(replaces = "kill")
PNone killFallback(VirtualFrame frame, Object pid, Object signal,
@CachedLibrary(limit = "getCallSiteInlineCacheMaxDepth()") PythonObjectLibrary lib,
@Cached ReadAttributeFromObjectNode readSignalNode,
@Cached IsNode isNode) {
ThreadState state = PArguments.getThreadState(frame);
return kill(frame, lib.asSizeWithState(pid, state), lib.asSizeWithState(signal, state), readSignalNode, isNode);
}
}
@Builtin(name = "fsync", minNumOfPositionalArgs = 1)
@GenerateNodeFactory
abstract static class FSyncNode extends PythonUnaryBuiltinNode {
@Specialization
PNone fsync(VirtualFrame frame, int fd) {
if (!getContext().getResources().fsync(fd)) {
throw raiseOSError(frame, OSErrorEnum.ENOENT.getNumber());
}
return PNone.NONE;
}
}
@Builtin(name = "ftruncate", minNumOfPositionalArgs = 2)
@GenerateNodeFactory
abstract static class FTruncateNode extends PythonBinaryBuiltinNode {
@Specialization
PNone ftruncate(VirtualFrame frame, int fd, long length) {
try {
getContext().getResources().ftruncate(fd, length);
} catch (IOException e) {
throw raiseOSError(frame, OSErrorEnum.ENOENT.getNumber());
}
return PNone.NONE;
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
deepfence_diagnosis/service/diagnosis.go
|
package main
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"unicode/utf8"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/google/uuid"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type diagnosisT struct{}
var (
supervisorContainers []string
supervisorLogsFolder = "/var/log/supervisor"
consoleNamespace string
nodeMetrics string
)
const (
CELERY_CONTAINER = "deepfence-celery"
VULNERABILITY_CONTAINER_LOG_PATH = "/var/log/vulnerability_scan_logs/"
)
func init() {
supervisorContainers = []string{"deepfence-analyzer-0", "deepfence-analyzer-1", "deepfence-analyzer-2", "deepfence-celery", "deepfence-backend", "deepfence-api"}
consoleNamespace = os.Getenv("CONSOLE_NAMESPACE")
if consoleNamespace == "" {
consoleNamespace = "default"
}
nodeMetrics = os.Getenv("NODE_METRICS")
}
func addSupervisorLogsKubernetes(pod v1.Pod, tarWriter *tar.Writer) error {
logsNeeded := false
for _, supervisorContainer := range supervisorContainers {
if strings.Contains(pod.Name, supervisorContainer) {
logsNeeded = true
break
}
}
if logsNeeded == false {
return nil
}
randID := uuid.New().String()
tmpFolder := "/tmp/" + randID + "/supervisor-logs/" + pod.Name
_ = os.MkdirAll(tmpFolder, os.ModePerm)
command := fmt.Sprintf("kubectl cp %s/%s:%s %s", pod.Namespace, pod.Name, supervisorLogsFolder, tmpFolder)
_, err := ExecuteCommand(command)
if err != nil {
return err
}
filepath.Walk(tmpFolder, func(file string, fi os.FileInfo, err error) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
return err
}
// here number 3 has been used to cut some nested path values in tar writer
// like if path is /tmp/some1/some2/some3 then dir structure in tar will be /some2/some3
header.Name = strings.Join(strings.Split(filepath.ToSlash(file), "/")[3:], "/")
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// if not a dir, write file content
if !fi.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tarWriter, data); err != nil {
return err
}
}
return nil
})
os.RemoveAll("/tmp/" + randID)
return nil
}
func addVulnerabilityLogsKubernetes(pod v1.Pod, tarWriter *tar.Writer) error {
logsNeeded := false
for _, supervisorContainer := range supervisorContainers {
if strings.Contains(pod.Name, supervisorContainer) {
logsNeeded = true
break
}
}
if logsNeeded == false {
return nil
}
randID := uuid.New().String()
tmpFolder := "/tmp/" + randID + "/" + pod.Name + "/vulnerability_scan_logs"
_ = os.MkdirAll(tmpFolder, os.ModePerm)
command := fmt.Sprintf("kubectl cp %s/%s:%s %s", pod.Namespace, pod.Name, VULNERABILITY_CONTAINER_LOG_PATH, tmpFolder)
_, err := ExecuteCommand(command)
if err != nil {
return err
}
filepath.Walk(tmpFolder, func(file string, fi os.FileInfo, err error) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
return err
}
// here number 3 has been used to cut some nested path values in tar writer
// like if path is /tmp/some1/some2/some3 then dir structure in tar will be /some2/some3
header.Name = strings.Join(strings.Split(filepath.ToSlash(file), "/")[3:], "/")
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// if not a dir, write file content
if !fi.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tarWriter, data); err != nil {
return err
}
}
return nil
})
os.RemoveAll("/tmp/" + randID)
return nil
}
func addVulnerabilityLogsDocker(container types.Container, tarWriter *tar.Writer) error {
containerName := strings.Trim(container.Names[0], "/")
if !InArray(containerName, supervisorContainers) {
return nil
}
tarStream, err := copyFromContainer(container.ID, VULNERABILITY_CONTAINER_LOG_PATH)
if err != nil {
return nil
}
tr := tar.NewReader(tarStream)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // end of tar archive
}
if err != nil {
break
}
logBytes, err := ioutil.ReadAll(tr)
if err != nil {
break
}
if hdr.FileInfo().IsDir() {
hdr.Name = containerName
} else {
hdr.Name = containerName + "/" + hdr.Name
}
if err := tarWriter.WriteHeader(hdr); err != nil {
break
}
if _, err := tarWriter.Write(logBytes); err != nil {
break
}
}
return nil
}
func addSupervisorLogsDocker(container types.Container, tarWriter *tar.Writer) error {
containerName := strings.Trim(container.Names[0], "/")
logsNeeded := false
for _, supervisorContainer := range supervisorContainers {
if strings.Contains(containerName, supervisorContainer) {
logsNeeded = true
break
}
}
if logsNeeded == false {
return nil
}
tarStream, err := copyFromContainer(container.ID, supervisorLogsFolder)
if err != nil {
return nil
}
tr := tar.NewReader(tarStream)
for {
hdr, err := tr.Next()
if err == io.EOF {
break // end of tar archive
}
if err != nil {
break
}
logBytes, err := ioutil.ReadAll(tr)
if err != nil {
break
}
if hdr.FileInfo().IsDir() {
hdr.Name = containerName
} else {
hdr.Name = containerName + "/" + hdr.Name
}
if err := tarWriter.WriteHeader(hdr); err != nil {
break
}
if _, err := tarWriter.Write(logBytes); err != nil {
break
}
}
return nil
}
func (t *diagnosisT) ServeHTTP(w http.ResponseWriter, r *http.Request) {
filename := "deepfence-logs"
// w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.tar.gz\"", filename))
// var buf bytes.Buffer
gzipWriter := gzip.NewWriter(w)
gzipWriter.Name = "deepfence-logs.tar"
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
defer gzipWriter.Close()
ctx := context.Background()
if orchestrator == kubernetesOrchestrator {
labelSelector := "app=deepfence-console"
values := r.URL.Query()
containerName := values.Get("container_name")
if len(containerName) > 0 {
labelSelector += fmt.Sprintf(",name=%s", containerName)
}
options := metaV1.ListOptions{
LabelSelector: labelSelector,
}
pods, err := getPods(options)
if err != nil {
errMsg := "Error while getting pods data - " + err.Error()
http.Error(w, errMsg, http.StatusInternalServerError)
return
}
celeryPod, err := getPodWithLabel(CELERY_CONTAINER, pods)
if err != nil {
errMsg := "Error while getting pods data - " + err.Error()
http.Error(w, errMsg, http.StatusInternalServerError)
return
}
podOptions := v1.PodLogOptions{}
tailLimitStr := values.Get("tail")
if len(tailLimitStr) > 0 {
tailLimit, err := strconv.ParseInt(tailLimitStr, 10, 64)
if err != nil {
errMsg := "Error while getting pods data - " + err.Error()
http.Error(w, errMsg, http.StatusInternalServerError)
return
}
podOptions.TailLines = &tailLimit
}
err = addVulnerabilityLogsKubernetes(celeryPod, tarWriter)
if err != nil {
fmt.Println(err)
}
for _, pod := range pods {
req := kubeCli.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podOptions)
podLogs, err := req.Stream(ctx)
if err != nil {
fmt.Println("error in opening stream", err)
continue
}
logBytes, err := ioutil.ReadAll(podLogs)
if err != nil {
continue
}
podLogs.Close()
hdr := &tar.Header{
Name: fmt.Sprintf("%s.log", pod.Name),
Mode: 0600,
Size: int64(utf8.RuneCount(logBytes)),
}
if err := tarWriter.WriteHeader(hdr); err != nil {
continue
}
if _, err := tarWriter.Write(logBytes); err != nil {
continue
}
err = addSupervisorLogsKubernetes(pod, tarWriter)
if err != nil {
fmt.Println(err)
}
}
} else {
values := r.URL.Query()
containerName := values.Get("container_name")
tail := values.Get("tail")
containerFilters := filters.NewArgs()
if len(containerName) > 0 {
containerFilters.Add("name", containerName)
}
containers := getContainers(types.ContainerListOptions{
Filters: containerFilters,
All: true,
})
celeryContainer, _ := getContainer(CELERY_CONTAINER, containers)
logOptions := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: tail,
}
// get vulnerability mapper logs
if celeryContainer.Names != nil {
err := addVulnerabilityLogsDocker(celeryContainer, tarWriter)
if err != nil {
fmt.Println(err)
}
}
for _, container := range containers {
if len(container.Names) == 0 {
continue
}
containerName := strings.Trim(container.Names[0], "/")
logs, err := getContainerLogs(container.ID, logOptions)
if err != nil {
continue
}
logBytes, err := ioutil.ReadAll(logs)
if err != nil {
continue
}
// if len(logBytes) == 0 {
// continue
// }
hdr := &tar.Header{
Name: fmt.Sprintf("%s.log", containerName),
Mode: 0600,
Size: int64(utf8.RuneCount(logBytes)),
}
if err := tarWriter.WriteHeader(hdr); err != nil {
continue
}
if _, err := tarWriter.Write(logBytes); err != nil {
continue
}
err = addSupervisorLogsDocker(container, tarWriter)
if err != nil {
fmt.Println(err)
}
}
}
tarWriter.Flush()
gzipWriter.Flush()
}
|
[
"\"CONSOLE_NAMESPACE\"",
"\"NODE_METRICS\""
] |
[] |
[
"NODE_METRICS",
"CONSOLE_NAMESPACE"
] |
[]
|
["NODE_METRICS", "CONSOLE_NAMESPACE"]
|
go
| 2 | 0 | |
setup/migrate_aide.py
|
'''
Run this file whenever you update AIDE to bring your existing project setup up-to-date
with respect to changes due to newer versions.
2019-21 Benjamin Kellenberger
'''
import os
import argparse
from psycopg2 import sql
from constants import version
MODIFICATIONS_sql = [
'ALTER TABLE "{schema}".annotation ADD COLUMN IF NOT EXISTS meta VARCHAR; ALTER TABLE "{schema}".image_user ADD COLUMN IF NOT EXISTS meta VARCHAR;',
'ALTER TABLE "{schema}".labelclass ADD COLUMN IF NOT EXISTS keystroke SMALLINT UNIQUE;',
'ALTER TABLE "{schema}".image ADD COLUMN IF NOT EXISTS last_requested TIMESTAMPTZ;',
# support for multiple projects
'CREATE SCHEMA IF NOT EXISTS aide_admin',
'''DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'labeltype') THEN
create type labelType AS ENUM ('labels', 'points', 'boundingBoxes', 'segmentationMasks');
END IF;
END
$$;''',
'''CREATE TABLE IF NOT EXISTS aide_admin.project (
shortname VARCHAR UNIQUE NOT NULL,
name VARCHAR UNIQUE NOT NULL,
description VARCHAR,
isPublic BOOLEAN DEFAULT FALSE,
secret_token VARCHAR,
interface_enabled BOOLEAN DEFAULT FALSE,
demoMode BOOLEAN DEFAULT FALSE,
annotationType labelType NOT NULL,
predictionType labelType,
ui_settings VARCHAR,
numImages_autoTrain BIGINT,
minNumAnnoPerImage INTEGER,
maxNumImages_train BIGINT,
maxNumImages_inference BIGINT,
ai_model_enabled BOOLEAN NOT NULL DEFAULT FALSE,
ai_model_library VARCHAR,
ai_model_settings VARCHAR,
ai_alCriterion_library VARCHAR,
ai_alCriterion_settings VARCHAR,
PRIMARY KEY(shortname)
);''',
'''CREATE TABLE IF NOT EXISTS aide_admin.user (
name VARCHAR UNIQUE NOT NULL,
email VARCHAR,
hash BYTEA,
isSuperuser BOOLEAN DEFAULT FALSE,
canCreateProjects BOOLEAN DEFAULT FALSE,
session_token VARCHAR,
last_login TIMESTAMPTZ,
secret_token VARCHAR DEFAULT md5(random()::text),
PRIMARY KEY (name)
);''',
'ALTER TABLE aide_admin.user ADD COLUMN IF NOT EXISTS secret_token VARCHAR DEFAULT md5(random()::text);',
'''CREATE TABLE IF NOT EXISTS aide_admin.authentication (
username VARCHAR NOT NULL,
project VARCHAR NOT NULL,
isAdmin BOOLEAN DEFAULT FALSE,
PRIMARY KEY (username, project),
FOREIGN KEY (username) REFERENCES aide_admin.user (name),
FOREIGN KEY (project) REFERENCES aide_admin.project (shortname)
);''',
'ALTER TABLE "{schema}".image_user DROP CONSTRAINT IF EXISTS image_user_image_fkey;',
'ALTER TABLE "{schema}".image_user DROP CONSTRAINT IF EXISTS image_user_username_fkey;',
'''DO
$do$
BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.tables
WHERE table_schema = '"{schema}"'
AND table_name = 'user'
) THEN
INSERT INTO aide_admin.user (name, email, hash, isSuperUser, canCreateProjects, secret_token)
SELECT name, email, hash, false AS isSuperUser, false AS canCreateProjects, md5(random()::text) AS secret_token FROM "{schema}".user
ON CONFLICT(name) DO NOTHING;
END IF;
END $do$;''',
'ALTER TABLE "{schema}".image_user ADD CONSTRAINT image_user_image_fkey FOREIGN KEY (username) REFERENCES aide_admin.USER (name);',
'ALTER TABLE "{schema}".annotation DROP CONSTRAINT IF EXISTS annotation_username_fkey;',
'ALTER TABLE "{schema}".annotation ADD CONSTRAINT annotation_username_fkey FOREIGN KEY (username) REFERENCES aide_admin.USER (name);',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS model_library VARCHAR;',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS alCriterion_library VARCHAR;',
'ALTER TABLE "{schema}".image ADD COLUMN IF NOT EXISTS isGoldenQuestion BOOLEAN NOT NULL DEFAULT FALSE;',
'''-- IoU function for statistical evaluations
CREATE OR REPLACE FUNCTION "intersection_over_union" (
"ax" real, "ay" real, "awidth" real, "aheight" real,
"bx" real, "by" real, "bwidth" real, "bheight" real)
RETURNS real AS $iou$
DECLARE
iou real;
BEGIN
SELECT (
CASE WHEN aright < bleft OR bright < aleft OR
atop < bbottom OR btop < abottom THEN 0.0
ELSE GREATEST(inters / (unionplus - inters), 0.0)
END
) INTO iou
FROM (
SELECT
((iright - ileft) * (itop - ibottom)) AS inters,
aarea + barea AS unionplus,
aleft, aright, atop, abottom,
bleft, bright, btop, bbottom
FROM (
SELECT
((aright - aleft) * (atop - abottom)) AS aarea,
((bright - bleft) * (btop - bbottom)) AS barea,
GREATEST(aleft, bleft) AS ileft,
LEAST(atop, btop) AS itop,
LEAST(aright, bright) AS iright,
GREATEST(abottom, bbottom) AS ibottom,
aleft, aright, atop, abottom,
bleft, bright, btop, bbottom
FROM (
SELECT (ax - awidth/2) AS aleft, (ay + aheight/2) AS atop,
(ax + awidth/2) AS aright, (ay - aheight/2) AS abottom,
(bx - bwidth/2) AS bleft, (by + bheight/2) AS btop,
(bx + bwidth/2) AS bright, (by - bheight/2) AS bbottom
) AS qq
) AS qq2
) AS qq3;
RETURN iou;
END;
$iou$ LANGUAGE plpgsql;
''',
'ALTER TABLE "{schema}".image ADD COLUMN IF NOT EXISTS date_added TIMESTAMPTZ NOT NULL DEFAULT NOW();',
'ALTER TABLE aide_admin.authentication ADD COLUMN IF NOT EXISTS admitted_until TIMESTAMPTZ;',
'ALTER TABLE aide_admin.authentication ADD COLUMN IF NOT EXISTS blocked_until TIMESTAMPTZ;',
#TODO: we probably don't need unique group names (useful e.g. for nested groups)
# '''ALTER TABLE "{schema}".labelclassgroup DROP CONSTRAINT IF EXISTS labelclassgroup_name_unique;
# ALTER TABLE "{schema}".labelclassgroup ADD CONSTRAINT labelclassgroup_name_unique UNIQUE (name);''',
'ALTER TABLE "{schema}".labelclassgroup DROP CONSTRAINT IF EXISTS labelclassgroup_name_unique;',
'ALTER TABLE "{schema}".image ADD COLUMN IF NOT EXISTS corrupt BOOLEAN;',
'''
CREATE TABLE IF NOT EXISTS "{schema}".workflow (
id uuid DEFAULT uuid_generate_v4(),
name VARCHAR UNIQUE,
workflow VARCHAR NOT NULL,
username VARCHAR NOT NULL,
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
timeModified TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (id),
FOREIGN KEY (username) REFERENCES aide_admin.user(name)
)
''',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS default_workflow uuid',
'ALTER TABLE "{schema}".image_user ADD COLUMN IF NOT EXISTS num_interactions INTEGER NOT NULL DEFAULT 0;'
'ALTER TABLE "{schema}".image_user ADD COLUMN IF NOT EXISTS first_checked TIMESTAMPTZ;',
'ALTER TABLE "{schema}".image_user ADD COLUMN IF NOT EXISTS total_time_required BIGINT;',
'''ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS segmentation_ignore_unlabeled BOOLEAN NOT NULL DEFAULT TRUE;
ALTER TABLE "{schema}".labelclass ADD COLUMN IF NOT EXISTS hidden BOOLEAN NOT NULL DEFAULT FALSE;
''',
'ALTER TABLE "{schema}".annotation ADD COLUMN IF NOT EXISTS autoConverted BOOLEAN;',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS owner VARCHAR;',
'''ALTER TABLE aide_admin.project DROP CONSTRAINT IF EXISTS project_user_fkey;
ALTER TABLE aide_admin.project ADD CONSTRAINT project_user_fkey FOREIGN KEY (owner) REFERENCES aide_admin.USER (name);''',
# new workflow history
''' CREATE TABLE IF NOT EXISTS "{schema}".workflowHistory (
id uuid DEFAULT uuid_generate_v4(),
workflow VARCHAR NOT NULL,
tasks VARCHAR,
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
timeFinished TIMESTAMPTZ,
launchedBy VARCHAR,
abortedBy VARCHAR,
succeeded BOOLEAN,
messages VARCHAR,
PRIMARY KEY (id),
FOREIGN KEY (launchedBy) REFERENCES aide_admin.user (name),
FOREIGN KEY (abortedBy) REFERENCES aide_admin.user (name)
);''',
# project folder watching
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS watch_folder_enabled BOOLEAN NOT NULL DEFAULT FALSE;',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS watch_folder_remove_missing_enabled BOOLEAN NOT NULL DEFAULT FALSE;',
# model marketplace
'''CREATE TABLE IF NOT EXISTS aide_admin.modelMarketplace (
id uuid DEFAULT uuid_generate_v4(),
name VARCHAR UNIQUE NOT NULL,
description VARCHAR NOT NULL,
labelclasses VARCHAR NOT NULL,
author VARCHAR NOT NULL,
model_library VARCHAR NOT NULL,
statedict BYTEA,
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
alCriterion_library VARCHAR,
origin_project VARCHAR,
origin_uuid UUID,
origin_uri VARCHAR,
public BOOLEAN NOT NULL DEFAULT TRUE,
anonymous BOOLEAN NOT NULL DEFAULT FALSE,
selectCount INTEGER NOT NULL DEFAULT 0,
shared BOOLEAN NOT NULL DEFAULT TRUE,
tags VARCHAR,
PRIMARY KEY (id)
);''',
'ALTER TABLE aide_admin.modelMarketplace ADD COLUMN IF NOT EXISTS shared BOOLEAN NOT NULL DEFAULT TRUE;',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS marketplace_origin_id UUID;',
'ALTER TABLE "{schema}".cnnstate DROP CONSTRAINT IF EXISTS marketplace_origin_id_fkey;'
'ALTER TABLE "{schema}".cnnstate ADD CONSTRAINT marketplace_origin_id_fkey FOREIGN KEY (marketplace_origin_id) REFERENCES aide_admin.modelMarketplace(id);',
'ALTER TABLE aide_admin.modelMarketplace ADD COLUMN IF NOT EXISTS tags VARCHAR;',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS archived BOOLEAN DEFAULT FALSE;',
'''
/*
Last occurrence of substring. Function obtained from here:
https://wiki.postgresql.org/wiki/Strposrev
*/
CREATE OR REPLACE FUNCTION strposrev(instring text, insubstring text)
RETURNS integer AS
$BODY$
DECLARE result INTEGER;
BEGIN
IF strpos(instring, insubstring) = 0 THEN
-- no match
result:=0;
ELSEIF length(insubstring)=1 THEN
-- add one to get the correct position from the left.
result:= 1 + length(instring) - strpos(reverse(instring), insubstring);
ELSE
-- add two minus the legth of the search string
result:= 2 + length(instring)- length(insubstring) - strpos(reverse(instring), reverse(insubstring));
END IF;
RETURN result;
END;
$BODY$
LANGUAGE plpgsql IMMUTABLE STRICT
COST 4;
''',
'''
CREATE OR REPLACE VIEW "{schema}".fileHierarchy AS (
SELECT DISTINCT
CASE WHEN position('/' IN filename) = 0 THEN null
ELSE left(filename, strposrev(filename, '/')-1) END
AS folder
FROM "{schema}".image
);
''',
'''
CREATE TABLE IF NOT EXISTS "{schema}".bookmark (
username VARCHAR NOT NULL,
image uuid NOT NULL,
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (username, image),
FOREIGN KEY (username) REFERENCES "aide_admin".user(name),
FOREIGN KEY (image) REFERENCES "{schema}".image
);
''',
# # change in workflow definition #TODO: takes forever...
# '''
# DELETE FROM "{schema}".workflowhistory
# WHERE timecreated < TO_DATE('20201204','YYYYMMDD');
# ''',
'''
ALTER TABLE "{schema}".labelclass ADD COLUMN IF NOT EXISTS
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW();
''',
f'''
CREATE TABLE IF NOT EXISTS "aide_admin".version (
version VARCHAR UNIQUE NOT NULL,
PRIMARY KEY (version)
);
''',
'ALTER TABLE aide_admin.modelMarketplace ADD COLUMN IF NOT EXISTS origin_uri VARCHAR UNIQUE;',
'ALTER TABLE aide_admin.modelMarketplace ALTER stateDict DROP NOT NULL;', # due to pre-trained models we now allow empty state dicts)...
'ALTER TABLE aide_admin.modelMarketplace DROP CONSTRAINT IF EXISTS modelmarketplace_author_fkey;', # ...as well as foreign model authors
'ALTER TABLE "{schema}".cnnstate ALTER stateDict DROP NOT NULL;',
'''
CREATE TABLE IF NOT EXISTS "{schema}".taskhistory (
id uuid NOT NULL DEFAULT uuid_generate_v4(),
task_id VARCHAR NOT NULL,
launchedBy VARCHAR,
abortedBy VARCHAR,
processDescription VARCHAR,
timeCreated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
timeFinished TIMESTAMPTZ,
result VARCHAR,
PRIMARY KEY (id),
FOREIGN KEY (launchedBy) REFERENCES aide_admin.user (name),
FOREIGN KEY (abortedBy) REFERENCES aide_admin.user (name)
);
''',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS stats VARCHAR;',
'ALTER TABLE aide_admin.modelMarketplace ADD COLUMN IF NOT EXISTS model_settings VARCHAR;',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS inference_chunk_size BIGINT;',
'ALTER TABLE aide_admin.project ADD COLUMN IF NOT EXISTS max_num_concurrent_tasks INTEGER;',
# explicit model-to-labelclass mapping
'''
CREATE TABLE IF NOT EXISTS "{schema}".model_labelclass (
--ai_model_library VARCHAR NOT NULL,
marketplace_origin_id UUID NOT NULL,
labelclass_id_model VARCHAR NOT NULL,
labelclass_name_model VARCHAR NOT NULL,
labelclass_id_project UUID,
PRIMARY KEY (ai_model_library, labelclass_id_model),
FOREIGN KEY (labelclass_id_project) REFERENCES "{schema}".labelclass (id)
);
''',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS imported_from_marketplace BOOLEAN NOT NULL DEFAULT FALSE;',
'ALTER TABLE "{schema}".cnnstate ADD COLUMN IF NOT EXISTS labelclass_autoupdate BOOLEAN NOT NULL DEFAULT FALSE;',
'ALTER TABLE "aide_admin".project ADD COLUMN IF NOT EXISTS labelclass_autoupdate BOOLEAN NOT NULL DEFAULT FALSE;',
'ALTER TABLE "aide_admin".project ADD COLUMN IF NOT EXISTS annotationType labelType NOT NULL',
'ALTER TABLE "aide_admin".project ADD COLUMN IF NOT EXISTS predictionType labelType NOT NULL',
# thanks to "imported_from_marketplace" field, we don't want the unique constraint on the origin ID anymore
'ALTER TABLE "{schema}".cnnstate DROP CONSTRAINT IF EXISTS cnnstate_marketplace_origin_id_key;',
# we also allow multiple models with the same origin URI (for updates of Web-imported models or uploads with iid file name)
'ALTER TABLE "aide_admin".modelmarketplace DROP CONSTRAINT IF EXISTS modelmarketplace_origin_uri_key;',
# for Model Marketplace version 1.1
'ALTER TABLE "aide_admin".modelmarketplace ADD COLUMN IF NOT EXISTS citation_info VARCHAR;',
'ALTER TABLE "aide_admin".modelmarketplace ADD COLUMN IF NOT EXISTS license VARCHAR;'
]
def migrate_aide(forceMigrate=False):
from modules import Database, UserHandling
from util.configDef import Config
config = Config()
dbConn = Database(config)
if not dbConn.canConnect():
raise Exception('Error connecting to database.')
warnings = []
errors = []
# skip if not forced and if database has same version
doMigrate = True
# check if DB has version already implemented
dbVersion = None
hasVersion = dbConn.execute('''
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'aide_admin'
AND table_name = 'version'
) AS hasVersion;
''', None, 1)
if hasVersion[0]['hasversion']:
# check DB version
dbVersion = dbConn.execute('SELECT version FROM aide_admin.version;', None, 1)
if dbVersion is not None and len(dbVersion):
dbVersion = dbVersion[0]['version']
needsUpdate = version.compare_versions(version.AIDE_VERSION, dbVersion)
if needsUpdate is not None:
if needsUpdate < 0:
# running an older version of AIDE with a newer DB version
warnings.append(f'WARNING: local AIDE version ({version.AIDE_VERSION}) is older than the one in the database ({dbVersion}); please update your installation.')
elif needsUpdate == 0:
doMigrate = False
else:
doMigrate = True
if not doMigrate and not forceMigrate:
return warnings, errors
# bring all projects up-to-date (if registered within AIDE)
projects = dbConn.execute('SELECT shortname FROM aide_admin.project;', None, 'all')
if projects is not None and len(projects):
# get all schemata and check if project still exists
schemata = dbConn.execute('SELECT schema_name FROM information_schema.schemata', None, 'all')
if schemata is not None and len(schemata):
schemata = set([s['schema_name'].lower() for s in schemata])
for p in projects:
try:
pName = p['shortname']
# check if project still exists
if not pName.lower() in schemata:
warnings.append(f'WARNING: project "{pName}" is registered but does not exist in database.')
#TODO: option to auto-remove?
continue
# special modification for CNN-to-labelclass map: drop only dep. on version (remove ancient tests)
if version.compare_versions(version.AIDE_VERSION, dbVersion) in (-1, None):
dbConn.execute(sql.SQL('DROP TABLE IF EXISTS {};').format(
sql.Identifier(pName, 'cnn_labelclass')
), None)
# make modifications one at a time
for mod in MODIFICATIONS_sql:
dbConn.execute(mod.format(schema=pName), None, None)
# pre-official 2.0: mark existing CNN states as "labelclass_autoupdate" (as this was the default behavior)
if version.compare_versions(dbVersion, '2.0.210514') == -1:
dbConn.execute(sql.SQL('''
UPDATE {}
SET labelclass_autoupdate = TRUE;
''').format(sql.Identifier(pName, 'cnnstate')), None)
except Exception as e:
errors.append(str(e))
else:
warnings.append('WARNING: no project schemata found within database.')
else:
warnings.append('WARNING: no project registered within AIDE.')
# update DB version accordingly
dbConn.execute('''
DELETE FROM aide_admin.version;
INSERT INTO aide_admin.version (version)
VALUES (%s);
''', (version.AIDE_VERSION, ))
return warnings, errors
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update AIDE database structure.')
parser.add_argument('--force', type=int, default=0,
help='Set to 1 to force migration, even if AIDE versions already match.')
parser.add_argument('--settings_filepath', type=str, default='config/settings.ini', const=1, nargs='?',
help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')
args = parser.parse_args()
if not 'AIDE_CONFIG_PATH' in os.environ:
os.environ['AIDE_CONFIG_PATH'] = str(args.settings_filepath)
if not 'AIDE_MODULES' in os.environ:
os.environ['AIDE_MODULES'] = '' # for compatibility with Celery worker import
warnings, errors = migrate_aide(args.force)
if not len(warnings) and not len(errors):
print(f'AIDE is now up-to-date with the latest version ({version.AIDE_VERSION})')
else:
print(f'Warnings and/or errors occurred while updating AIDE to the latest version ({version.AIDE_VERSION}):')
if len(warnings):
print('\nWarnings:')
for w in warnings:
print(f'\t"{w}"')
if len(errors):
print('\nErrors:')
for e in errors:
print(f'\t"{e}"')
|
[] |
[] |
[
"AIDE_CONFIG_PATH",
"AIDE_MODULES"
] |
[]
|
["AIDE_CONFIG_PATH", "AIDE_MODULES"]
|
python
| 2 | 0 | |
dataent/pythonrc.py
|
#!/usr/bin/env python2.7
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
import dataent
dataent.connect(site=os.environ.get("site"))
|
[] |
[] |
[
"site"
] |
[]
|
["site"]
|
python
| 1 | 0 | |
tools/npy_tempita/__init__.py
|
"""
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
This copy of tempita was taken from https://github.com/gjhiggins/tempita
with a few changes to remove the six dependency.
"""
from __future__ import absolute_import, division, print_function
import re
import sys
try:
from urllib.parse import quote as url_quote
from io import StringIO
from html import escape as html_escape
except ImportError:
from urllib import quote as url_quote
from cStringIO import StringIO
from cgi import escape as html_escape
import os
import tokenize
from ._looper import looper
from .compat3 import (
PY3, bytes, basestring_, next, is_unicode, coerce_text, iteritems)
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimiters=None):
self.content = content
# set delimiters
if delimiters is None:
delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
assert len(delimiters) == 2 and all(
[isinstance(delimiter, basestring_)
for delimiter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimiters[0]
self.default_namespace['end_braces'] = delimiters[1]
self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset,
delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
with open(filename, 'rb') as f:
c = f.read()
if encoding:
c = c.decode(encoding)
elif PY3:
c = c.decode('latin-1')
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
("If you pass in a single argument, you must pass in a ",
"dict-like object (with a .items() method); you gave %r")
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
# __traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
# __traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in iteritems(defs):
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
# __traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
# __traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(
self, name, signature, body=parts, ns=ns, pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
# __traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
# __traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
# __traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
e_type, e_value, e_traceback = sys.exc_info()
if getattr(e_value, 'args', None):
arg0 = e_value.args[0]
else:
arg0 = coerce_text(e_value)
e_value.args = (self._add_line_info(arg0, pos),)
if PY3:
raise e_value
else:
exec('raise e_type, e_value, e_traceback')
def _exec(self, code, ns, pos):
# __traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
e_type, e_value, e_traceback = sys.exc_info()
if e_value.args:
e_value.args = (self._add_line_info(e_value.args[0], pos),)
else:
e_value.args = (self._add_line_info(None, pos),)
if PY3:
raise e_value
else:
exec('raise e_type, e_value, e_traceback')
def _repr(self, value, pos):
# __traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
value = str(value)
if not is_unicode(value):
value = value.decode('utf-8')
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value) and self.default_encoding):
value = value.encode(self.default_encoding)
except:
e_type, e_value, e_traceback = sys.exc_info()
e_value.args = (self._add_line_info(e_value.args[0], pos),)
if PY3:
raise e_value
else:
exec('raise e_type, e_value, e_traceback')
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimiters=None, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in iteritems(kw):
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in iteritems(self)]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
# HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = html_escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = html_escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(iteritems(kw))
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in iteritems(kw):
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in iteritems(defaults):
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (
self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return '' if PY3 else u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
# Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
if delimiters is None:
delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
if expr == delimiters[0] and in_expr:
raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
elif expr == delimiters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
lex.__doc__ = """
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
tempita.TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
tempita.TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
tempita.TemplateError: {{ inside expression at line 1 column 10
""" if PY3 else """
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not
isinstance(next_chunk, basestring_) or
not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok and (not next_chunk or lead_whitespace_re.search(
next_chunk) or (
i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip()) or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
trim_lex.__doc__ = r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
""" if PY3 else r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
def find_position(string, index, last_index, last_pos):
"""
Given a string and index, return (line, column)
"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimiters=None):
if delimiters is None:
delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
parse.__doc__ = r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse(
... 'series->{{for x in y}}x={{x}}{{endfor}}'
... ) #doctest: +NORMALIZE_WHITESPACE
['series->',
('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse(
... '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
... ) #doctest: +NORMALIZE_WHITESPACE
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
tempita.TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
tempita.TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
tempita.TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
tempita.TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
tempita.TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
tempita.TemplateError: Multi-line py blocks must start
with a newline at line 1 column 3
""" if PY3 else r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse(
... 'series->{{for x in y}}x={{x}}{{endfor}}'
... ) #doctest: +NORMALIZE_WHITESPACE
['series->',
('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse(
... '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
... ) #doctest: +NORMALIZE_WHITESPACE
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start
with a newline at line 1 column 3
"""
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ') or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple) and (
tokens[0][0] == 'endif' or tokens[0][0].startswith(
'elif ') or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" %
first, position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" %
first, position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(
tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (
tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (
tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or
(tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(
sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count \
and tok_type == tokenize.OP \
and tok_string == nest_type:
nest_count += 1
elif nest_count \
and tok_type == tokenize.OP \
and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count \
and tok_type == tokenize.OP \
and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow + 1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
with open(template_name, 'rb', encoding="latin-1") as f:
template_content = f.read()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
with open(options.output, 'wb') as f:
f.write(result)
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import airflow
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'mesos',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinx.ext.intersphinx',
]
autodoc_default_flags = ['show-inheritance', 'members']
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
# copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'google-cloud-python': (
'https://googleapis.github.io/google-cloud-python/latest/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('http://docs.python-requests.org/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Apache Airflow', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Apache Airflow'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Apache Airflow', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
), ]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
[] |
[] |
[
"BUILDING_AIRFLOW_DOCS"
] |
[]
|
["BUILDING_AIRFLOW_DOCS"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "colcat_crowdsourcing.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/path/test_sdss5.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: test_sdss5.py
# Project: path
# Author: Brian Cherinka
# Created: Monday, 12th October 2020 5:37:05 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Monday, 12th October 2020 5:37:05 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
import os
import pytest
from sdss_access.path import Path
from sdss_access import config
@pytest.fixture(scope='module')
def path():
pp = Path(release='sdss5')
yield pp
pp = None
class TestSVPaths(object):
@pytest.mark.parametrize('name, special, keys, exp',
[('apStar', '@apgprefix',
{'apred': 'r12', 'apstar': 'stars', 'telescope': 'apo25m',
'healpix': '12345', 'obj': '12345'},
'r12/stars/apo25m/12/12345/apStar-r12-12345.fits')],
ids=['apStar'])
def test_apogee_paths(self, path, name, special, keys, exp):
assert special in path.templates[name]
full = path.full(name, **keys)
assert exp in full
def test_netloc(self, path):
assert path.netloc == 'data.sdss5.org'
def assert_orig_sdss5_envvars(self):
assert os.getenv("ROBOSTRATEGY_DATA") == '/tmp/robodata'
assert os.getenv("ALLWISE_DIR") == '/tmp/allwise'
assert os.getenv("EROSITA_DIR") == '/tmp/erosita'
def assert_updated_sdss5_envvars(self):
assert 'sdsswork/sandbox/robostrategy' in os.getenv("ROBOSTRATEGY_DATA")
assert 'sdsswork/target/catalogs/allwise' in os.getenv("ALLWISE_DIR")
assert 'sdsswork/target/catalogs/eRosita' in os.getenv("EROSITA_DIR")
def assert_all_envvars(self):
self.assert_orig_sdss5_envvars()
assert 'sdsswork/sandbox/robostrategy' not in os.getenv("ROBOSTRATEGY_DATA")
def assert_subset_envvars(self):
assert 'sdsswork/sandbox/robostrategy' in os.getenv("ROBOSTRATEGY_DATA")
assert os.getenv("ROBOSTRATEGY_DATA") != '/tmp/robodata'
assert os.getenv("ALLWISE_DIR") == '/tmp/allwise'
assert os.getenv("EROSITA_DIR") == '/tmp/erosita'
def test_replant_updated(self, monkeysdss5):
self.assert_orig_sdss5_envvars()
pp = Path(release='sdss5')
self.assert_updated_sdss5_envvars()
assert 'sdsswork/sandbox/robostrategy' in pp.full('rsFields', plan='A', observatory='apo')
def test_replant_preserve_all_envvars(self, monkeysdss5):
self.assert_orig_sdss5_envvars()
pp = Path(release='sdss5', preserve_envvars=True)
self.assert_all_envvars()
assert 'tmp/robodata' in pp.full('rsFields', plan='A', observatory='apo')
assert 'tmp/allwise' in pp.full('allwisecat', ver='1.0', num=1234)
def test_replant_preserve_subset_envvars(self, monkeysdss5):
self.assert_orig_sdss5_envvars()
pp = Path(release='sdss5', preserve_envvars=['ALLWISE_DIR', 'EROSITA_DIR'])
self.assert_subset_envvars()
assert 'sdsswork/sandbox/robostrategy' in pp.full('rsFields', plan='A', observatory='apo')
assert 'tmp/allwise' in pp.full('allwisecat', ver='1.0', num=1234)
def test_replant_preserve_all_from_config(self, monkeysdss5, monkeypatch):
monkeypatch.setitem(config, 'preserve_envvars', True)
self.assert_orig_sdss5_envvars()
pp = Path(release='sdss5')
self.assert_all_envvars()
assert 'tmp/robodata' in pp.full('rsFields', plan='A', observatory='apo')
assert 'tmp/allwise' in pp.full('allwisecat', ver='1.0', num=1234)
def test_replant_preserve_subset_from_config(self, monkeysdss5, monkeypatch):
monkeypatch.setitem(config, 'preserve_envvars', ['ALLWISE_DIR', 'EROSITA_DIR'])
self.assert_orig_sdss5_envvars()
pp = Path(release='sdss5')
self.assert_subset_envvars()
assert 'sdsswork/sandbox/robostrategy' in pp.full(
'rsFields', plan='A', observatory='apo')
assert 'tmp/allwise' in pp.full('allwisecat', ver='1.0', num=1234)
|
[] |
[] |
[
"EROSITA_DIR",
"ALLWISE_DIR",
"ROBOSTRATEGY_DATA"
] |
[]
|
["EROSITA_DIR", "ALLWISE_DIR", "ROBOSTRATEGY_DATA"]
|
python
| 3 | 0 | |
integration-tests/helpers_test.go
|
package integration_tests
import (
"fmt"
sdk "github.com/ionos-cloud/ionos-enterprise-sdk-go/v5"
"os"
"strings"
"sync"
)
var (
syncDC sync.Once
syncCDC sync.Once
dataCenter *sdk.Datacenter
compositeDataCenter *sdk.Datacenter
server *sdk.Server
volume *sdk.Volume
lan *sdk.Lan
location = "us/las"
image *sdk.Image
fw *sdk.FirewallRule
nic *sdk.Nic
sourceMac = "01:23:45:67:89:00"
portRangeStart = 22
portRangeEnd = 22
onceDC sync.Once
onceServerDC sync.Once
onceServer sync.Once
onceFw sync.Once
onceServerVolume sync.Once
onceCD sync.Once
onceLan sync.Once
onceLanServer sync.Once
onceLanLan sync.Once
onceLB sync.Once
onceLBDC sync.Once
onceLBServer sync.Once
onceLBNic sync.Once
onceNicNic sync.Once
ipBlock *sdk.IPBlock
loadBalancer *sdk.Loadbalancer
snapshot *sdk.Snapshot
snapshotname = "GO SDK TEST"
snapshotdescription = "GO SDK test snapshot"
backupUnit *sdk.BackupUnit
cluster *sdk.KubernetesCluster
share *sdk.Share
)
func boolAddr(v bool) *bool {
return &v
}
// Setup creds for single running tests
func setupTestEnv() sdk.Client {
client := *sdk.NewClient(os.Getenv("IONOS_USERNAME"), os.Getenv("IONOS_PASSWORD"))
if val, ok := os.LookupEnv("IONOS_API_URL"); ok {
client.SetCloudApiURL(val)
}
return client
}
func createDataCenter() {
c := setupTestEnv()
var obj = sdk.Datacenter{
Properties: sdk.DatacenterProperties{
Name: "GO SDK Test",
Description: "GO SDK test datacenter",
Location: location,
},
}
resp, err := c.CreateDatacenter(obj)
if err != nil {
panic(err)
}
err = c.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
panic(err)
}
dataCenter = resp
}
func createLan() {
c := setupTestEnv()
var obj = sdk.Lan{
Properties: sdk.LanProperties{
Name: "GO SDK Test",
Public: true,
},
}
resp, _ := c.CreateLan(dataCenter.ID, obj)
c.WaitTillProvisioned(resp.Headers.Get("Location"))
lan = resp
}
func createCompositeDataCenter() {
c := setupTestEnv()
var obj = sdk.Datacenter{
Properties: sdk.DatacenterProperties{
Name: "GO SDK Test Composite",
Description: "GO SDK test composite datacenter",
Location: location,
},
Entities: sdk.DatacenterEntities{
Servers: &sdk.Servers{
Items: []sdk.Server{
{
Properties: sdk.ServerProperties{
Name: "GO SDK Test",
RAM: 1024,
Cores: 1,
},
},
},
},
Volumes: &sdk.Volumes{
Items: []sdk.Volume{
{
Properties: sdk.VolumeProperties{
Type: "HDD",
Size: 2,
Name: "GO SDK Test",
Bus: "VIRTIO",
LicenceType: "UNKNOWN",
AvailabilityZone: "ZONE_3",
},
},
},
},
},
}
resp, err := c.CreateDatacenter(obj)
if err != nil {
fmt.Println("error while creating", err)
fmt.Println(resp.Response)
return
}
compositeDataCenter = resp
err = c.WaitTillProvisioned(compositeDataCenter.Headers.Get("Location"))
if err != nil {
fmt.Println("error while waiting", err)
}
}
func createCompositeServerFW() {
c := setupTestEnv()
var req = sdk.Server{
Properties: sdk.ServerProperties{
Name: "GO SDK Test",
RAM: 1024,
Cores: 1,
AvailabilityZone: "ZONE_1",
CPUFamily: "INTEL_XEON",
},
Entities: &sdk.ServerEntities{
Volumes: &sdk.Volumes{
Items: []sdk.Volume{
{
Properties: sdk.VolumeProperties{
Type: "HDD",
Size: 5,
Name: "volume1",
ImageAlias: "ubuntu:latest",
ImagePassword: "JWXuXR9CMghXAc6v",
},
},
},
},
Nics: &sdk.Nics{
Items: []sdk.Nic{
{
Properties: &sdk.NicProperties{
Name: "nic",
Lan: 1,
},
Entities: &sdk.NicEntities{
FirewallRules: &sdk.FirewallRules{
Items: []sdk.FirewallRule{
{
Properties: sdk.FirewallruleProperties{
Name: "SSH",
Protocol: "TCP",
SourceMac: &sourceMac,
PortRangeStart: &portRangeStart,
PortRangeEnd: &portRangeEnd,
},
},
},
},
},
},
},
},
},
}
srv, err := c.CreateServer(dataCenter.ID, req)
if err != nil {
fmt.Println("[createCompositeServerFW] error while creating a server: ", err)
os.Exit(1)
}
server = srv
nic = &srv.Entities.Nics.Items[0]
fw = &nic.Entities.FirewallRules.Items[0]
err = c.WaitTillProvisioned(srv.Headers.Get("Location"))
if err != nil {
fmt.Println("[createCompositeServerFW] server creation timeout timeout: ", err)
os.Exit(1)
}
}
func createNic() {
c := setupTestEnv()
obj := sdk.Nic{
Properties: &sdk.NicProperties{
Name: "GO SDK Test",
Lan: 1,
},
}
resp, _ := c.CreateNic(dataCenter.ID, server.ID, obj)
c.WaitTillProvisioned(resp.Headers.Get("Location"))
nic = resp
}
func createLoadBalancerWithIP() {
c := setupTestEnv()
var obj = sdk.IPBlock{
Properties: sdk.IPBlockProperties{
Name: "GO SDK Test",
Size: 1,
Location: "us/las",
},
}
resp, err := c.ReserveIPBlock(obj)
if err != nil {
fmt.Println("Error while reserving an IP block", err)
fmt.Println(resp.Response)
os.Exit(1)
}
err = c.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
fmt.Println("error while waiting for IPBlock to be reserved: ", err)
os.Exit(1)
}
ipBlock = resp
var request = sdk.Loadbalancer{
Properties: sdk.LoadbalancerProperties{
Name: "GO SDK Test",
IP: resp.Properties.IPs[0],
Dhcp: true,
},
Entities: sdk.LoadbalancerEntities{
Balancednics: &sdk.BalancedNics{
Items: []sdk.Nic{
{
ID: nic.ID,
},
},
},
},
}
resp1, err := c.CreateLoadbalancer(dataCenter.ID, request)
if err != nil {
fmt.Println("error while creating load balancer: ", err)
fmt.Println(resp1.Response)
os.Exit(1)
}
err = c.WaitTillProvisioned(resp1.Headers.Get("Location"))
if err != nil {
fmt.Println("error while waiting for load balancer to be created: ", err)
os.Exit(1)
}
loadBalancer = resp1
nic = &loadBalancer.Entities.Balancednics.Items[0]
}
func createVolume() {
c := setupTestEnv()
var request = sdk.Volume{
Properties: sdk.VolumeProperties{
Size: 2,
Name: "GO SDK Test",
LicenceType: "OTHER",
Type: "HDD",
},
}
resp, err := c.CreateVolume(dataCenter.ID, request)
if err != nil {
fmt.Println("error while creating volume: ", err)
fmt.Println(resp.Response)
os.Exit(1)
}
volume = resp
c.WaitTillProvisioned(resp.Headers.Get("Location"))
}
func createSnapshot() {
c := setupTestEnv()
resp, err := c.CreateSnapshot(dataCenter.ID, volume.ID, snapshotname, snapshotdescription)
if err != nil {
fmt.Println("error creating snapshot: ", err)
os.Exit(1)
}
snapshot = resp
err = c.WaitTillProvisioned(snapshot.Headers.Get("Location"))
if err != nil {
fmt.Println("time out waiting for snapshot creation: ", err)
os.Exit(1)
}
}
func mknicCustom(client sdk.Client, dcid, serverid string, lanid int, ips []string) string {
var request = sdk.Nic{
Properties: &sdk.NicProperties{
Lan: lanid,
Name: "GO SDK Test",
Nat: boolAddr(false),
FirewallActive: boolAddr(true),
Ips: ips,
},
}
resp, err := client.CreateNic(dcid, serverid, request)
if err != nil {
return ""
}
err = client.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
return ""
}
return resp.ID
}
func createServer() {
server = setupCreateServer(dataCenter.ID)
if server == nil {
panic("Server not created")
}
}
func setupCreateServer(srvDc string) *sdk.Server {
c := setupTestEnv()
var req = sdk.Server{
Properties: sdk.ServerProperties{
Name: "GO SDK Test",
RAM: 1024,
Cores: 1,
AvailabilityZone: "ZONE_1",
CPUFamily: "INTEL_XEON",
},
}
srv, err := c.CreateServer(srvDc, req)
if err != nil {
return nil
}
err = c.WaitTillProvisioned(srv.Headers.Get("Location"))
if err != nil {
return nil
}
return srv
}
func setupVolume() {
c := setupTestEnv()
vol := sdk.Volume{
Properties: sdk.VolumeProperties{
Type: "HDD",
Size: 2,
Name: "GO SDK Test",
Bus: "VIRTIO",
LicenceType: "UNKNOWN",
},
}
resp, err := c.CreateVolume(dataCenter.ID, vol)
if err != nil {
fmt.Println("create volume failed")
}
volume = resp
err = c.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
fmt.Println("failed while waiting on volume to finish")
}
}
func setupVolumeAttached() {
c := setupTestEnv()
vol := sdk.Volume{
Properties: sdk.VolumeProperties{
Type: "HDD",
Size: 2,
Name: "GO SDK Test",
Bus: "VIRTIO",
LicenceType: "UNKNOWN",
},
}
resp, err := c.CreateVolume(dataCenter.ID, vol)
if err != nil {
fmt.Println("create volume failed")
}
err = c.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
fmt.Println("failed while waiting on volume to finish")
}
volume = resp
volume, err = c.AttachVolume(dataCenter.ID, server.ID, volume.ID)
if err != nil {
fmt.Println("attach volume failed", err)
}
err = c.WaitTillProvisioned(volume.Headers.Get("Location"))
if err != nil {
fmt.Println("failed while waiting on volume to finish")
}
}
func setupCDAttached() {
c := setupTestEnv()
var imageID string
images, err := c.ListImages()
for _, img := range images.Items {
if img.Properties.ImageType == "CDROM" && img.Properties.Location == "us/las" && img.Properties.Public == true {
imageID = img.ID
break
}
}
resp, err := c.AttachCdrom(dataCenter.ID, server.ID, imageID)
if err != nil {
fmt.Println("attach CD failed", err)
}
image = resp
err = c.WaitTillProvisioned(resp.Headers.Get("Location"))
if err != nil {
fmt.Println("failed while waiting on volume to finish")
}
}
func reserveIP() {
c := setupTestEnv()
var obj = sdk.IPBlock{
Properties: sdk.IPBlockProperties{
Name: "GO SDK Test",
Size: 1,
Location: location,
},
}
resp, _ := c.ReserveIPBlock(obj)
ipBlock = resp
}
func getImageID(location string, imageName string, imageType string) string {
if imageName == "" {
return ""
}
c := setupTestEnv()
images, err := c.ListImages()
if err != nil {
return ""
}
if len(images.Items) > 0 {
for _, i := range images.Items {
imgName := ""
if i.Properties.Name != "" {
imgName = i.Properties.Name
}
if imageType == "SSD" {
imageType = "HDD"
}
if imgName != "" && strings.Contains(strings.ToLower(imgName), strings.ToLower(imageName)) && i.Properties.ImageType == imageType && i.Properties.Location == location && i.Properties.Public == true {
return i.ID
}
}
}
return ""
}
|
[
"\"IONOS_USERNAME\"",
"\"IONOS_PASSWORD\""
] |
[] |
[
"IONOS_PASSWORD",
"IONOS_USERNAME"
] |
[]
|
["IONOS_PASSWORD", "IONOS_USERNAME"]
|
go
| 2 | 0 | |
pkg/engine/repository_test.go
|
package engine
import (
"io/ioutil"
"os"
"os/exec"
"strings"
"testing"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/bigkevmcd/peanut-engine/pkg/parser/kustomize"
"github.com/google/go-cmp/cmp"
)
func TestParseManifestAddsAnnotation(t *testing.T) {
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/peanut-engine.git", Branch: "main", Path: "pkg/testdata"}
r := testRepository(t, c)
m, err := r.ParseManifests()
assertNoError(t, err)
d := m[0]
gcm, err := r.GCMark(kube.GetResourceKey(d))
assertNoError(t, err)
want := map[string]string{
annotationGCMark: gcm,
}
if diff := cmp.Diff(want, d.GetAnnotations()); diff != "" {
t.Fatalf("parsed manifest:\n%s", diff)
}
}
func TestOpen(t *testing.T) {
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/peanut-engine.git", Branch: "main", Path: "pkg/testdata"}
r := NewRepository(c, kustomize.New())
dir, cleanup := mkTempDir(t)
t.Cleanup(cleanup)
err := r.Open(dir)
if !strings.Contains(err.Error(), `repository does not exist`) {
t.Fatalf("incorrect error: %s", err)
}
}
func TestClone(t *testing.T) {
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/peanut.git", Branch: "main", Path: "pkg/testdata"}
dir, cleanup := mkTempDir(t)
t.Cleanup(cleanup)
r := NewRepository(c, kustomize.New())
err := r.Clone(dir)
assertNoError(t, err)
want := execGitHead(t, dir)
got, err := r.HeadHash()
assertNoError(t, err)
if want != got.String() {
t.Fatalf("incorrect git SHA from HeadHash, got %#v, want %#v", got.String(), want)
}
}
func TestCloneWithPrivateRepo(t *testing.T) {
if os.Getenv("TEST_GITHUB_AUTH_TOKEN") == "" {
t.Skip("this test needs a GitHub auth token")
}
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/go-demo-private.git", Branch: "main", Path: "pkg/engine/testdata", AuthToken: os.Getenv("TEST_GITHUB_AUTH_TOKEN")}
dir, cleanup := mkTempDir(t)
t.Cleanup(cleanup)
r := NewRepository(c, kustomize.New())
err := r.Clone(dir)
assertNoError(t, err)
want := execGitHead(t, dir)
got, err := r.HeadHash()
assertNoError(t, err)
if want != got.String() {
t.Fatalf("incorrect git SHA from HeadHash, got %#v, want %#v", got.String(), want)
}
}
func TestCloneWithMissingSource(t *testing.T) {
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/doesnotexist.git", Branch: "main", Path: "pkg/testdata"}
dir, cleanup := mkTempDir(t)
t.Cleanup(cleanup)
r := NewRepository(c, kustomize.New())
err := r.Clone(dir)
// Unfortunately this is unable to determine the difference between a
// non-existent Repo and one that requires authentication.
if !strings.Contains(err.Error(), `authentication required`) {
t.Fatalf("incorrect error: %s", err)
}
}
func TestSync(t *testing.T) {
t.Skip()
}
func TestHeadHash(t *testing.T) {
want := execGitHead(t, ".")
c := GitConfig{RepoURL: "https://github.com/bigkevmcd/peanut-engine.git", Branch: "main", Path: "pkg/testdata"}
r := testRepository(t, c)
got, err := r.HeadHash()
assertNoError(t, err)
if want != got.String() {
t.Fatalf("incorrect git SHA from HeadHash, got %#v, want %#v", got.String(), want)
}
}
func TestIsManaged(t *testing.T) {
t.Skip()
}
func testRepository(t *testing.T, c GitConfig) *PeanutRepository {
t.Helper()
r := NewRepository(c, kustomize.New())
err := r.Open("../..")
assertNoError(t, err)
return r
}
func assertNoError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func mkTempDir(t *testing.T) (string, func()) {
t.Helper()
dir, err := ioutil.TempDir("", "peanut")
if err != nil {
t.Fatal(err)
}
return dir, func() {
assertNoError(t, os.RemoveAll(dir))
}
}
func execGitHead(t *testing.T, dir string) string {
cmd := exec.Command("git", "rev-parse", "HEAD")
cmd.Dir = dir
out, err := cmd.Output()
assertNoError(t, err)
return strings.TrimSpace(string(out))
}
|
[
"\"TEST_GITHUB_AUTH_TOKEN\"",
"\"TEST_GITHUB_AUTH_TOKEN\""
] |
[] |
[
"TEST_GITHUB_AUTH_TOKEN"
] |
[]
|
["TEST_GITHUB_AUTH_TOKEN"]
|
go
| 1 | 0 | |
super/wsgi.py
|
"""
WSGI config for super project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "super.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/objstore/s3/s3.go
|
// Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package s3 implements common object storage abstractions against s3-compatible APIs.
package s3
import (
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"gopkg.in/yaml.v2"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/thanos-io/thanos/pkg/runutil"
)
type ctxKey int
const (
// DirDelim is the delimiter used to model a directory structure in an object store bucket.
DirDelim = "/"
// SSEKMS is the name of the SSE-KMS method for objectstore encryption.
SSEKMS = "SSE-KMS"
// SSEC is the name of the SSE-C method for objstore encryption.
SSEC = "SSE-C"
// SSES3 is the name of the SSE-S3 method for objstore encryption.
SSES3 = "SSE-S3"
// sseConfigKey is the context key to override SSE config. This feature is used by downstream
// projects (eg. Cortex) to inject custom SSE config on a per-request basis. Future work or
// refactoring can introduce breaking changes as far as the functionality is preserved.
// NOTE: we're using a context value only because it's a very specific S3 option. If SSE will
// be available to wider set of backends we should probably add a variadic option to Get() and Upload().
sseConfigKey = ctxKey(0)
)
var DefaultConfig = Config{
PutUserMetadata: map[string]string{},
HTTPConfig: HTTPConfig{
IdleConnTimeout: model.Duration(90 * time.Second),
ResponseHeaderTimeout: model.Duration(2 * time.Minute),
TLSHandshakeTimeout: model.Duration(10 * time.Second),
ExpectContinueTimeout: model.Duration(1 * time.Second),
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 0,
},
PartSize: 1024 * 1024 * 64, // 64MB.
}
// Config stores the configuration for s3 bucket.
type Config struct {
Bucket string `yaml:"bucket"`
Endpoint string `yaml:"endpoint"`
Region string `yaml:"region"`
AccessKey string `yaml:"access_key"`
Insecure bool `yaml:"insecure"`
SignatureV2 bool `yaml:"signature_version2"`
SecretKey string `yaml:"secret_key"`
PutUserMetadata map[string]string `yaml:"put_user_metadata"`
HTTPConfig HTTPConfig `yaml:"http_config"`
TraceConfig TraceConfig `yaml:"trace"`
ListObjectsVersion string `yaml:"list_objects_version"`
// PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize.
// NOTE we need to make sure this number does not produce more parts than 10 000.
PartSize uint64 `yaml:"part_size"`
SSEConfig SSEConfig `yaml:"sse_config"`
STSEndpoint string `yaml:"sts_endpoint"`
}
// SSEConfig deals with the configuration of SSE for Minio. The following options are valid:
// kmsencryptioncontext == https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html#s3-encryption-context
type SSEConfig struct {
Type string `yaml:"type"`
KMSKeyID string `yaml:"kms_key_id"`
KMSEncryptionContext map[string]string `yaml:"kms_encryption_context"`
EncryptionKey string `yaml:"encryption_key"`
}
type TraceConfig struct {
Enable bool `yaml:"enable"`
}
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
type HTTPConfig struct {
IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"`
ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"`
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"`
ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"`
MaxConnsPerHost int `yaml:"max_conns_per_host"`
// Allow upstream callers to inject a round tripper
Transport http.RoundTripper `yaml:"-"`
TLSConfig objstore.TLSConfig `yaml:"tls_config"`
}
// DefaultTransport - this default transport is based on the Minio
// DefaultTransport up until the following commit:
// https://github.com/minio/minio-go/commit/008c7aa71fc17e11bf980c209a4f8c4d687fc884
// The values have since diverged.
func DefaultTransport(config Config) (*http.Transport, error) {
tlsConfig, err := objstore.NewTLSConfig(&config.HTTPConfig.TLSConfig)
if err != nil {
return nil, err
}
if config.HTTPConfig.InsecureSkipVerify {
tlsConfig.InsecureSkipVerify = true
}
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: config.HTTPConfig.MaxIdleConns,
MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost,
IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout),
MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost,
TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout),
ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout),
// A custom ResponseHeaderTimeout was introduced
// to cover cases where the tcp connection works but
// the server never answers. Defaults to 2 minutes.
ResponseHeaderTimeout: time.Duration(config.HTTPConfig.ResponseHeaderTimeout),
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer: https://golang.org/src/net/http/transport.go?h=roundTrip#L1843.
DisableCompression: true,
TLSClientConfig: tlsConfig,
}, nil
}
// Bucket implements the store.Bucket interface against s3-compatible APIs.
type Bucket struct {
logger log.Logger
name string
client *minio.Client
defaultSSE encrypt.ServerSide
putUserMetadata map[string]string
partSize uint64
listObjectsV1 bool
}
// parseConfig unmarshals a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := DefaultConfig
if err := yaml.UnmarshalStrict(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// NewBucket returns a new Bucket using the provided s3 config values.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
config, err := parseConfig(conf)
if err != nil {
return nil, err
}
return NewBucketWithConfig(logger, config, component)
}
type overrideSignerType struct {
credentials.Provider
signerType credentials.SignatureType
}
func (s *overrideSignerType) Retrieve() (credentials.Value, error) {
v, err := s.Provider.Retrieve()
if err != nil {
return v, err
}
if !v.SignerType.IsAnonymous() {
v.SignerType = s.signerType
}
return v, nil
}
// NewBucketWithConfig returns a new Bucket using the provided s3 config values.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
var chain []credentials.Provider
// TODO(bwplotka): Don't do flags as they won't scale, use actual params like v2, v4 instead
wrapCredentialsProvider := func(p credentials.Provider) credentials.Provider { return p }
if config.SignatureV2 {
wrapCredentialsProvider = func(p credentials.Provider) credentials.Provider {
return &overrideSignerType{Provider: p, signerType: credentials.SignatureV2}
}
}
if err := validate(config); err != nil {
return nil, err
}
if config.AccessKey != "" {
chain = []credentials.Provider{wrapCredentialsProvider(&credentials.Static{
Value: credentials.Value{
AccessKeyID: config.AccessKey,
SecretAccessKey: config.SecretKey,
SignerType: credentials.SignatureV4,
},
})}
} else {
chain = []credentials.Provider{
wrapCredentialsProvider(&credentials.EnvAWS{}),
wrapCredentialsProvider(&credentials.FileAWSCredentials{}),
wrapCredentialsProvider(&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
Endpoint: config.STSEndpoint,
}),
}
}
// Check if a roundtripper has been set in the config
// otherwise build the default transport.
var rt http.RoundTripper
if config.HTTPConfig.Transport != nil {
rt = config.HTTPConfig.Transport
} else {
var err error
rt, err = DefaultTransport(config)
if err != nil {
return nil, err
}
}
client, err := minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewChainCredentials(chain),
Secure: !config.Insecure,
Region: config.Region,
Transport: rt,
})
if err != nil {
return nil, errors.Wrap(err, "initialize s3 client")
}
client.SetAppInfo(fmt.Sprintf("thanos-%s", component), fmt.Sprintf("%s (%s)", version.Version, runtime.Version()))
var sse encrypt.ServerSide
if config.SSEConfig.Type != "" {
switch config.SSEConfig.Type {
case SSEKMS:
// If the KMSEncryptionContext is a nil map the header that is
// constructed by the encrypt.ServerSide object will be base64
// encoded "nil" which is not accepted by AWS.
if config.SSEConfig.KMSEncryptionContext == nil {
config.SSEConfig.KMSEncryptionContext = make(map[string]string)
}
sse, err = encrypt.NewSSEKMS(config.SSEConfig.KMSKeyID, config.SSEConfig.KMSEncryptionContext)
if err != nil {
return nil, errors.Wrap(err, "initialize s3 client SSE-KMS")
}
case SSEC:
key, err := ioutil.ReadFile(config.SSEConfig.EncryptionKey)
if err != nil {
return nil, err
}
sse, err = encrypt.NewSSEC(key)
if err != nil {
return nil, errors.Wrap(err, "initialize s3 client SSE-C")
}
case SSES3:
sse = encrypt.NewSSE()
default:
sseErrMsg := errors.Errorf("Unsupported type %q was provided. Supported types are SSE-S3, SSE-KMS, SSE-C", config.SSEConfig.Type)
return nil, errors.Wrap(sseErrMsg, "Initialize s3 client SSE Config")
}
}
if config.TraceConfig.Enable {
logWriter := log.NewStdlibAdapter(level.Debug(logger), log.MessageKey("s3TraceMsg"))
client.TraceOn(logWriter)
}
if config.ListObjectsVersion != "" && config.ListObjectsVersion != "v1" && config.ListObjectsVersion != "v2" {
return nil, errors.Errorf("Initialize s3 client list objects version: Unsupported version %q was provided. Supported values are v1, v2", config.ListObjectsVersion)
}
bkt := &Bucket{
logger: logger,
name: config.Bucket,
client: client,
defaultSSE: sse,
putUserMetadata: config.PutUserMetadata,
partSize: config.PartSize,
listObjectsV1: config.ListObjectsVersion == "v1",
}
return bkt, nil
}
// Name returns the bucket name for s3.
func (b *Bucket) Name() string {
return b.name
}
// validate checks to see the config options are set.
func validate(conf Config) error {
if conf.Endpoint == "" {
return errors.New("no s3 endpoint in config file")
}
if conf.AccessKey == "" && conf.SecretKey != "" {
return errors.New("no s3 access_key specified while secret_key is present in config file; either both should be present in config or envvars/IAM should be used.")
}
if conf.AccessKey != "" && conf.SecretKey == "" {
return errors.New("no s3 secret_key specified while access_key is present in config file; either both should be present in config or envvars/IAM should be used.")
}
if conf.SSEConfig.Type == SSEC && conf.SSEConfig.EncryptionKey == "" {
return errors.New("encryption_key must be set if sse_config.type is set to 'SSE-C'")
}
if conf.SSEConfig.Type == SSEKMS && conf.SSEConfig.KMSKeyID == "" {
return errors.New("kms_key_id must be set if sse_config.type is set to 'SSE-KMS'")
}
return nil
}
// ValidateForTests checks to see the config options for tests are set.
func ValidateForTests(conf Config) error {
if conf.Endpoint == "" ||
conf.AccessKey == "" ||
conf.SecretKey == "" {
return errors.New("insufficient s3 test configuration information")
}
return nil
}
// Iter calls f for each entry in the given directory. The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
// Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the
// object itself as one prefix item.
if dir != "" {
dir = strings.TrimSuffix(dir, DirDelim) + DirDelim
}
opts := minio.ListObjectsOptions{
Prefix: dir,
Recursive: objstore.ApplyIterOptions(options...).Recursive,
UseV1: b.listObjectsV1,
}
for object := range b.client.ListObjects(ctx, b.name, opts) {
// Catch the error when failed to list objects.
if object.Err != nil {
return object.Err
}
// This sometimes happens with empty buckets.
if object.Key == "" {
continue
}
// The s3 client can also return the directory itself in the ListObjects call above.
if object.Key == dir {
continue
}
if err := f(object.Key); err != nil {
return err
}
}
return nil
}
func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
sse, err := b.getServerSideEncryption(ctx)
if err != nil {
return nil, err
}
opts := &minio.GetObjectOptions{ServerSideEncryption: sse}
if length != -1 {
if err := opts.SetRange(off, off+length-1); err != nil {
return nil, err
}
} else if off > 0 {
if err := opts.SetRange(off, 0); err != nil {
return nil, err
}
}
r, err := b.client.GetObject(ctx, b.name, name, *opts)
if err != nil {
return nil, err
}
// NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here
// for convenience.
if _, err := r.Read(nil); err != nil {
runutil.CloseWithLogOnErr(b.logger, r, "s3 get range obj close")
// First GET Object request error.
return nil, err
}
return r, nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, name, off, length)
}
// Exists checks if the given object exists.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
_, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{})
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrap(err, "stat s3 object")
}
return true, nil
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
sse, err := b.getServerSideEncryption(ctx)
if err != nil {
return err
}
// TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this.
size, err := objstore.TryToGetSize(r)
if err != nil {
level.Warn(b.logger).Log("msg", "could not guess file size for multipart upload; upload might be not optimized", "name", name, "err", err)
size = -1
}
partSize := b.partSize
if size < int64(partSize) {
partSize = 0
}
if _, err := b.client.PutObject(
ctx,
b.name,
name,
r,
size,
minio.PutObjectOptions{
PartSize: partSize,
ServerSideEncryption: sse,
UserMetadata: b.putUserMetadata,
},
); err != nil {
return errors.Wrap(err, "upload s3 object")
}
return nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) {
objInfo, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{})
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objInfo.Size,
LastModified: objInfo.LastModified,
}, nil
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
return b.client.RemoveObject(ctx, b.name, name, minio.RemoveObjectOptions{})
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
return minio.ToErrorResponse(errors.Cause(err)).Code == "NoSuchKey"
}
func (b *Bucket) Close() error { return nil }
// getServerSideEncryption returns the SSE to use.
func (b *Bucket) getServerSideEncryption(ctx context.Context) (encrypt.ServerSide, error) {
if value := ctx.Value(sseConfigKey); value != nil {
if sse, ok := value.(encrypt.ServerSide); ok {
return sse, nil
}
return nil, errors.New("invalid SSE config override provided in the context")
}
return b.defaultSSE, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("S3_BUCKET"),
Endpoint: os.Getenv("S3_ENDPOINT"),
AccessKey: os.Getenv("S3_ACCESS_KEY"),
SecretKey: os.Getenv("S3_SECRET_KEY"),
}
c.Insecure, _ = strconv.ParseBool(os.Getenv("S3_INSECURE"))
c.HTTPConfig.InsecureSkipVerify, _ = strconv.ParseBool(os.Getenv("S3_INSECURE_SKIP_VERIFY"))
c.SignatureV2, _ = strconv.ParseBool(os.Getenv("S3_SIGNATURE_VERSION2"))
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB, location string) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := ValidateForTests(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("S3_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset S3_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as aws s3 not being fully strong consistent.")
}
return NewTestBucketFromConfig(t, location, c, true)
}
func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucket bool) (objstore.Bucket, func(), error) {
ctx := context.Background()
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
bktToCreate := c.Bucket
if c.Bucket != "" && reuseBucket {
if err := b.Iter(ctx, "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "s3 check bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "AWS bucket for AWS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
if c.Bucket == "" {
bktToCreate = objstore.CreateTemporaryTestBucketName(t)
}
if err := b.client.MakeBucket(ctx, bktToCreate, minio.MakeBucketOptions{Region: location}); err != nil {
return nil, nil, err
}
b.name = bktToCreate
t.Log("created temporary AWS bucket for AWS tests with name", bktToCreate, "in", location)
return b, func() {
objstore.EmptyBucket(t, ctx, b)
if err := b.client.RemoveBucket(ctx, bktToCreate); err != nil {
t.Logf("deleting bucket %s failed: %s", bktToCreate, err)
}
}, nil
}
// ContextWithSSEConfig returns a context with a custom SSE config set. The returned context should be
// provided to S3 objstore client functions to override the default SSE config.
func ContextWithSSEConfig(ctx context.Context, value encrypt.ServerSide) context.Context {
return context.WithValue(ctx, sseConfigKey, value)
}
|
[
"\"S3_BUCKET\"",
"\"S3_ENDPOINT\"",
"\"S3_ACCESS_KEY\"",
"\"S3_SECRET_KEY\"",
"\"S3_INSECURE\"",
"\"S3_INSECURE_SKIP_VERIFY\"",
"\"S3_SIGNATURE_VERSION2\"",
"\"THANOS_ALLOW_EXISTING_BUCKET_USE\""
] |
[] |
[
"S3_BUCKET",
"S3_INSECURE_SKIP_VERIFY",
"THANOS_ALLOW_EXISTING_BUCKET_USE",
"S3_SECRET_KEY",
"S3_ACCESS_KEY",
"S3_INSECURE",
"S3_ENDPOINT",
"S3_SIGNATURE_VERSION2"
] |
[]
|
["S3_BUCKET", "S3_INSECURE_SKIP_VERIFY", "THANOS_ALLOW_EXISTING_BUCKET_USE", "S3_SECRET_KEY", "S3_ACCESS_KEY", "S3_INSECURE", "S3_ENDPOINT", "S3_SIGNATURE_VERSION2"]
|
go
| 8 | 0 | |
docker/docker.go
|
package docker
import (
"archive/tar"
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/go-connections/tlsconfig"
)
const (
Byte = 1
Kilobyte = 1024 * Byte
Megabyte = 1024 * Kilobyte
)
type DockerApi interface {
CreateNetwork(id string) error
ConnectNetwork(container, network, ip string) (string, error)
GetDaemonInfo() (types.Info, error)
GetSwarmPorts() ([]string, []uint16, error)
GetPorts() ([]uint16, error)
GetContainerStats(name string) (io.ReadCloser, error)
ContainerResize(name string, rows, cols uint) error
CreateAttachConnection(name string) (net.Conn, error)
CopyToContainer(containerName, destination, fileName string, content io.Reader) error
DeleteContainer(id string) error
CreateContainer(opts CreateContainerOpts) (string, error)
ExecAttach(instanceName string, command []string, out io.Writer) (int, error)
DisconnectNetwork(containerId, networkId string) error
DeleteNetwork(id string) error
Exec(instanceName string, command []string) (int, error)
New(ip string, cert, key []byte) (DockerApi, error)
SwarmInit() (*SwarmTokens, error)
SwarmJoin(addr, token string) error
}
type SwarmTokens struct {
Manager string
Worker string
}
type docker struct {
c *client.Client
}
func (d *docker) CreateNetwork(id string) error {
opts := types.NetworkCreate{Driver: "overlay", Attachable: true}
_, err := d.c.NetworkCreate(context.Background(), id, opts)
if err != nil {
log.Printf("Starting session err [%s]\n", err)
return err
}
return nil
}
func (d *docker) ConnectNetwork(containerId, networkId, ip string) (string, error) {
settings := &network.EndpointSettings{}
if ip != "" {
settings.IPAddress = ip
}
err := d.c.NetworkConnect(context.Background(), networkId, containerId, settings)
if err != nil && !strings.Contains(err.Error(), "already exists") {
log.Printf("Connection container to network err [%s]\n", err)
return "", err
}
// Obtain the IP of the PWD container in this network
container, err := d.c.ContainerInspect(context.Background(), containerId)
if err != nil {
return "", err
}
n, found := container.NetworkSettings.Networks[networkId]
if !found {
return "", fmt.Errorf("Container [%s] connected to the network [%s] but couldn't obtain it's IP address", containerId, networkId)
}
return n.IPAddress, nil
}
func (d *docker) GetDaemonInfo() (types.Info, error) {
return d.c.Info(context.Background())
}
func (d *docker) GetSwarmPorts() ([]string, []uint16, error) {
hosts := []string{}
ports := []uint16{}
nodesIdx := map[string]string{}
nodes, nodesErr := d.c.NodeList(context.Background(), types.NodeListOptions{})
if nodesErr != nil {
return nil, nil, nodesErr
}
for _, n := range nodes {
nodesIdx[n.ID] = n.Description.Hostname
hosts = append(hosts, n.Description.Hostname)
}
services, err := d.c.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return nil, nil, err
}
for _, service := range services {
for _, p := range service.Endpoint.Ports {
ports = append(ports, uint16(p.PublishedPort))
}
}
return hosts, ports, nil
}
func (d *docker) GetPorts() ([]uint16, error) {
opts := types.ContainerListOptions{}
containers, err := d.c.ContainerList(context.Background(), opts)
if err != nil {
return nil, err
}
openPorts := []uint16{}
for _, c := range containers {
for _, p := range c.Ports {
// When port is not published on the host docker return public port as 0, so we need to avoid it
if p.PublicPort != 0 {
openPorts = append(openPorts, p.PublicPort)
}
}
}
return openPorts, nil
}
func (d *docker) GetContainerStats(name string) (io.ReadCloser, error) {
stats, err := d.c.ContainerStats(context.Background(), name, false)
return stats.Body, err
}
func (d *docker) ContainerResize(name string, rows, cols uint) error {
return d.c.ContainerResize(context.Background(), name, types.ResizeOptions{Height: rows, Width: cols})
}
func (d *docker) CreateAttachConnection(name string) (net.Conn, error) {
ctx := context.Background()
conf := types.ContainerAttachOptions{true, true, true, true, "ctrl-^,ctrl-^", true}
conn, err := d.c.ContainerAttach(ctx, name, conf)
if err != nil {
return nil, err
}
return conn.Conn, nil
}
func (d *docker) CopyToContainer(containerName, destination, fileName string, content io.Reader) error {
r, w := io.Pipe()
b, readErr := ioutil.ReadAll(content)
if readErr != nil {
return readErr
}
t := tar.NewWriter(w)
go func() {
t.WriteHeader(&tar.Header{Name: fileName, Mode: 0600, Size: int64(len(b))})
t.Write(b)
t.Close()
w.Close()
}()
return d.c.CopyToContainer(context.Background(), containerName, destination, r, types.CopyToContainerOptions{AllowOverwriteDirWithFile: true})
}
func (d *docker) DeleteContainer(id string) error {
return d.c.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{Force: true, RemoveVolumes: true})
}
type CreateContainerOpts struct {
Image string
SessionId string
PwdIpAddress string
ContainerName string
Hostname string
ServerCert []byte
ServerKey []byte
CACert []byte
Privileged bool
HostFQDN string
}
func (d *docker) CreateContainer(opts CreateContainerOpts) (string, error) {
// Make sure directories are available for the new instance container
containerDir := "/var/run/pwd"
containerCertDir := fmt.Sprintf("%s/certs", containerDir)
env := []string{}
// Write certs to container cert dir
if len(opts.ServerCert) > 0 {
env = append(env, `DOCKER_TLSCERT=\/var\/run\/pwd\/certs\/cert.pem`)
}
if len(opts.ServerKey) > 0 {
env = append(env, `DOCKER_TLSKEY=\/var\/run\/pwd\/certs\/key.pem`)
}
if len(opts.CACert) > 0 {
// if ca cert is specified, verify that clients that connects present a certificate signed by the CA
env = append(env, `DOCKER_TLSCACERT=\/var\/run\/pwd\/certs\/ca.pem`)
}
if len(opts.ServerCert) > 0 || len(opts.ServerKey) > 0 || len(opts.CACert) > 0 {
// if any of the certs is specified, enable TLS
env = append(env, "DOCKER_TLSENABLE=true")
} else {
env = append(env, "DOCKER_TLSENABLE=false")
}
h := &container.HostConfig{
NetworkMode: container.NetworkMode(opts.SessionId),
Privileged: opts.Privileged,
AutoRemove: true,
LogConfig: container.LogConfig{Config: map[string]string{"max-size": "10m", "max-file": "1"}},
}
if os.Getenv("APPARMOR_PROFILE") != "" {
h.SecurityOpt = []string{fmt.Sprintf("apparmor=%s", os.Getenv("APPARMOR_PROFILE"))}
}
var pidsLimit = int64(1000)
if envLimit := os.Getenv("MAX_PROCESSES"); envLimit != "" {
if i, err := strconv.Atoi(envLimit); err == nil {
pidsLimit = int64(i)
}
}
h.Resources.PidsLimit = pidsLimit
h.Resources.Memory = 4092 * Megabyte
t := true
h.Resources.OomKillDisable = &t
env = append(env, fmt.Sprintf("PWD_IP_ADDRESS=%s", opts.PwdIpAddress))
env = append(env, fmt.Sprintf("PWD_HOST_FQDN=%s", opts.HostFQDN))
cf := &container.Config{Hostname: opts.Hostname,
Image: opts.Image,
Tty: true,
OpenStdin: true,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Env: env,
}
networkConf := &network.NetworkingConfig{
map[string]*network.EndpointSettings{
opts.SessionId: &network.EndpointSettings{Aliases: []string{opts.Hostname}},
},
}
container, err := d.c.ContainerCreate(context.Background(), cf, h, networkConf, opts.ContainerName)
if err != nil {
if client.IsErrImageNotFound(err) {
log.Printf("Unable to find image '%s' locally\n", opts.Image)
if err = d.pullImage(context.Background(), opts.Image); err != nil {
return "", err
}
container, err = d.c.ContainerCreate(context.Background(), cf, h, networkConf, opts.ContainerName)
if err != nil {
return "", err
}
} else {
return "", err
}
}
if err := d.copyIfSet(opts.ServerCert, "cert.pem", containerCertDir, opts.ContainerName); err != nil {
return "", err
}
if err := d.copyIfSet(opts.ServerKey, "key.pem", containerCertDir, opts.ContainerName); err != nil {
return "", err
}
if err := d.copyIfSet(opts.CACert, "ca.pem", containerCertDir, opts.ContainerName); err != nil {
return "", err
}
err = d.c.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{})
if err != nil {
return "", err
}
cinfo, err := d.c.ContainerInspect(context.Background(), container.ID)
if err != nil {
return "", err
}
return cinfo.NetworkSettings.Networks[opts.SessionId].IPAddress, nil
}
func (d *docker) pullImage(ctx context.Context, image string) error {
_, err := reference.ParseNormalizedNamed(image)
if err != nil {
return err
}
options := types.ImageCreateOptions{}
responseBody, err := d.c.ImageCreate(ctx, image, options)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesStream(
responseBody,
os.Stderr,
os.Stdout.Fd(),
false,
nil)
}
func (d *docker) copyIfSet(content []byte, fileName, path, containerName string) error {
if len(content) > 0 {
return d.CopyToContainer(containerName, path, fileName, bytes.NewReader(content))
}
return nil
}
func (d *docker) ExecAttach(instanceName string, command []string, out io.Writer) (int, error) {
e, err := d.c.ContainerExecCreate(context.Background(), instanceName, types.ExecConfig{Cmd: command, AttachStdout: true, AttachStderr: true, Tty: true})
if err != nil {
return 0, err
}
resp, err := d.c.ContainerExecAttach(context.Background(), e.ID, types.ExecConfig{AttachStdout: true, AttachStderr: true, Tty: true})
if err != nil {
return 0, err
}
io.Copy(out, resp.Reader)
var ins types.ContainerExecInspect
for _ = range time.Tick(1 * time.Second) {
ins, err = d.c.ContainerExecInspect(context.Background(), e.ID)
if ins.Running {
continue
}
if err != nil {
return 0, err
}
break
}
return ins.ExitCode, nil
}
func (d *docker) Exec(instanceName string, command []string) (int, error) {
e, err := d.c.ContainerExecCreate(context.Background(), instanceName, types.ExecConfig{Cmd: command})
if err != nil {
return 0, err
}
err = d.c.ContainerExecStart(context.Background(), e.ID, types.ExecStartCheck{})
if err != nil {
return 0, err
}
var ins types.ContainerExecInspect
for _ = range time.Tick(1 * time.Second) {
ins, err = d.c.ContainerExecInspect(context.Background(), e.ID)
if ins.Running {
continue
}
if err != nil {
return 0, err
}
break
}
return ins.ExitCode, nil
}
func (d *docker) DisconnectNetwork(containerId, networkId string) error {
err := d.c.NetworkDisconnect(context.Background(), networkId, containerId, true)
if err != nil {
log.Printf("Disconnection of container from network err [%s]\n", err)
return err
}
return nil
}
func (d *docker) DeleteNetwork(id string) error {
err := d.c.NetworkRemove(context.Background(), id)
if err != nil {
return err
}
return nil
}
func (d *docker) New(ip string, cert, key []byte) (DockerApi, error) {
// We check if the client needs to use TLS
var tlsConfig *tls.Config
if len(cert) > 0 && len(key) > 0 {
tlsConfig = tlsconfig.ClientDefault()
tlsConfig.InsecureSkipVerify = true
tlsCert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
}
transport := &http.Transport{
DialContext: (&net.Dialer{
Timeout: 1 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext}
if tlsConfig != nil {
transport.TLSClientConfig = tlsConfig
}
cli := &http.Client{
Transport: transport,
}
c, err := client.NewClient(fmt.Sprintf("http://%s:2375", ip), api.DefaultVersion, cli, nil)
if err != nil {
return nil, fmt.Errorf("Could not connect to DinD docker daemon. %s", err)
}
// try to connect up to 5 times and then give up
for i := 0; i < 5; i++ {
_, err := c.Ping(context.Background())
if err != nil {
if client.IsErrConnectionFailed(err) {
// connection has failed, maybe instance is not ready yet, sleep and retry
log.Printf("Connection to [%s] has failed, maybe instance is not ready yet, sleeping and retrying in 1 second. Try #%d\n", fmt.Sprintf("http://%s:2375", ip), i+1)
time.Sleep(time.Second)
continue
}
return nil, err
}
}
return NewDocker(c), nil
}
func (d *docker) SwarmInit() (*SwarmTokens, error) {
req := swarm.InitRequest{AdvertiseAddr: "eth0", ListenAddr: "0.0.0.0:2377"}
_, err := d.c.SwarmInit(context.Background(), req)
if err != nil {
return nil, err
}
swarmInfo, err := d.c.SwarmInspect(context.Background())
if err != nil {
return nil, err
}
return &SwarmTokens{
Worker: swarmInfo.JoinTokens.Worker,
Manager: swarmInfo.JoinTokens.Manager,
}, nil
}
func (d *docker) SwarmJoin(addr, token string) error {
req := swarm.JoinRequest{RemoteAddrs: []string{addr}, JoinToken: token, ListenAddr: "0.0.0.0:2377", AdvertiseAddr: "eth0"}
return d.c.SwarmJoin(context.Background(), req)
}
func NewDocker(c *client.Client) *docker {
return &docker{c: c}
}
|
[
"\"APPARMOR_PROFILE\"",
"\"APPARMOR_PROFILE\"",
"\"MAX_PROCESSES\""
] |
[] |
[
"APPARMOR_PROFILE",
"MAX_PROCESSES"
] |
[]
|
["APPARMOR_PROFILE", "MAX_PROCESSES"]
|
go
| 2 | 0 | |
conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# RobotPy WPILib documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 2 21:31:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#
# Imports
#
import sys
import os
from os.path import abspath, join, dirname
sys.path.insert(0, abspath(join(dirname(__file__))))
# -- RTD configuration ------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# This is used for linking and such so we link to the thing we're building
rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest")
if rtd_version not in ["stable", "latest"]:
rtd_version = "stable"
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_inline_tabs",
"sphinxext.opengraph",
"sphinx_reredirects",
]
ogp_custom_meta_tags = [
'<meta property="og:ignore_canonical" content="true" />',
'<meta name="theme-color" content="#3393d5" />',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "RobotPy"
copyright = "2014-2020, RobotPy development team"
intersphinx_mapping = {
"commandsv1": (
"https://robotpy.readthedocs.io/projects/commands-v1/en/%s/" % rtd_version,
None,
),
"commandsv2": (
"https://robotpy.readthedocs.io/projects/commands-v2/en/%s/" % rtd_version,
None,
),
"pyfrc": (
"https://robotpy.readthedocs.io/projects/pyfrc/en/%s/" % rtd_version,
None,
),
"networktables": (
"https://robotpy.readthedocs.io/projects/pynetworktables/en/%s/" % rtd_version,
None,
),
"wpilib": (
"https://robotpy.readthedocs.io/projects/wpilib/en/%s/" % rtd_version,
None,
),
"hal": (
"https://robotpy.readthedocs.io/projects/hal/en/%s/" % rtd_version,
None,
),
"robotpy_ext": (
"https://robotpy.readthedocs.io/projects/utilities/en/%s/" % rtd_version,
None,
),
"cscore": (
"https://robotpy.readthedocs.io/projects/cscore/en/%s/" % rtd_version,
None,
),
"frc": ("https://docs.wpilib.org/en/stable", None),
}
redirects = {
"2020_notes": "upgrade_notes.html"
}
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "2021"
# The full version, including alpha/beta/rc tags.
release = version
autoclass_content = "both"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Output file base name for HTML help builder.
htmlhelp_basename = "RobotPy"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"RobotPy.tex",
"RobotPy Documentation",
"RobotPy development team",
"manual",
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"RobotPy",
"RobotPy Documentation",
"RobotPy development team",
"RobotPy",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "RobotPy"
epub_author = "RobotPy development team"
epub_publisher = "RobotPy development team"
epub_copyright = "2014-2020, RobotPy development team"
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Custom Document processing ----------------------------------------------
from robotpy_sphinx.sidebar import generate_sidebar
generate_sidebar(
globals(),
"robotpy",
"https://raw.githubusercontent.com/robotpy/docs-sidebar/master/sidebar.toml",
)
|
[] |
[] |
[
"READTHEDOCS_VERSION",
"READTHEDOCS"
] |
[]
|
["READTHEDOCS_VERSION", "READTHEDOCS"]
|
python
| 2 | 0 | |
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
if os.environ.get('READTHEDOCS', None) == 'True':
# Run sphinx-apidoc automatically in readthedocs
# Taken from this: https://lists.torproject.org/pipermail/tor-commits/2012-September/046695.html
os.system('sphinx-apidoc -o api -T ../mitopipeline --separate')
sys.path.insert(0, os.path.abspath(os.path.pardir))
# -- Project information -----------------------------------------------------
project = u'mitopipeline'
copyright = u'2019, Timothy Kuo'
author = u'Timothy Kuo'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mitopipelinedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mitopipeline.tex', u'mitopipeline Documentation',
u'Timothy Kuo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mitopipeline', u'mitopipeline Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mitopipeline', u'mitopipeline Documentation',
author, 'mitopipeline', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
e2e/framework/framework.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"os"
"os/exec"
"time"
smv1alpha1 "github.com/itscontained/secret-manager/pkg/apis/secretmanager/v1alpha1"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
// KubectlPath defines the full path of the kubectl binary
KubectlPath = "/usr/local/bin/kubectl"
// RunID is a unique ID for this e2e test run
RunID = uuid.NewUUID()
)
const (
// Poll how often to poll for conditions
Poll = 2 * time.Second
// DefaultTimeout time to wait for operations to complete
DefaultTimeout = 90 * time.Second
)
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
type Framework struct {
// Name of the e2e test suite
BaseName string
Namespace string
// A Kubernetes and Service Catalog client
KubeClient client.Client
KubeConfig *restclient.Config
HelmValues string
}
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
func NewDefaultFramework(baseName, helmValues string) *Framework {
defer ginkgo.GinkgoRecover()
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = smv1alpha1.AddToScheme(scheme)
var kubeConfig *restclient.Config
var err error
kcPath := os.Getenv("KUBECONFIG")
if kcPath != "" {
kubeConfig, err = clientcmd.BuildConfigFromFlags("", kcPath)
} else {
kubeConfig, err = restclient.InClusterConfig()
}
if err != nil {
panic(err.Error())
}
assert.Nil(ginkgo.GinkgoT(), err, "creting kubernetes API client configuration")
kubeClient, err := client.New(kubeConfig, client.Options{Scheme: scheme})
assert.Nil(ginkgo.GinkgoT(), err, "creating Kubernetes API client")
f := &Framework{
BaseName: baseName,
KubeConfig: kubeConfig,
KubeClient: kubeClient,
HelmValues: helmValues,
Namespace: fmt.Sprintf("e2e-%s-%s", baseName, RunID),
}
ginkgo.BeforeEach(f.BeforeEach)
ginkgo.AfterEach(f.AfterEach)
return f
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
err := CreateNamespace(f.Namespace, f.KubeClient)
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
err = f.newSecretManager()
assert.Nil(ginkgo.GinkgoT(), err, "creating secret-manager")
_, err = WaitForSMPod(f.Namespace, f.KubeClient)
assert.Nil(ginkgo.GinkgoT(), err, "waiting for secret-manager to come up")
}
// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
logs, err := secretManagerLogs(f.KubeClient, f.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
logrus.Println(logs)
}
err := f.deleteSecretManager()
assert.Nil(ginkgo.GinkgoT(), err, "deleting secret-manager %v", f.Namespace)
err = deleteNamespace(f.Namespace, f.KubeClient)
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
}
// newSecretManager deploys a secret-manager instance using helm
// you can specify helm values file for a certain configuration
func (f *Framework) newSecretManager() error {
ginkgo.By("launching secret-manager")
//nolint:gosec
cmd := exec.Command("/wait-for-secret-manager.sh", f.Namespace, f.HelmValues, fmt.Sprintf("secret-manager-%s", f.BaseName))
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error creating secret-manager: %v.\nLogs:\n%v", err, string(out))
}
return nil
}
// deletes the secret manager release
func (f *Framework) deleteSecretManager() error {
ginkgo.By("deleting secret-manager")
//nolint:gosec
cmd := exec.Command("helm", "uninstall", fmt.Sprintf("secret-manager-%s", f.BaseName), "-n", f.Namespace)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error deleting secret-manager: %v.\nLogs:\n%v", err, string(out))
}
return nil
}
// NewLocalstack deploys a fresh localstack instance into the specified namespace
func (f *Framework) NewLocalstack(namespace string) error {
ginkgo.By("launching localstack")
cmd := exec.Command("/wait-for-localstack.sh", namespace, f.HelmValues)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("unexpected error creating localstack: %v.\nLogs:\n%v", err, string(out))
}
return nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
r_map/Enumeration.py
|
from .Node import Node
from .ValueNodeMixins import UnsignedValueNodeMixin
from operator import __eq__, __lt__, __le__, __gt__, __ge__
from functools import wraps
import r_map
def comp_func(op):
@wraps(op)
def op_func(self, other):
if isinstance(other, (Enumeration, r_map.BitField)):
return op(self.value, other.value)
elif isinstance(other, int):
return op(self.value, other)
else:
return NotImplemented
return op_func
def comparisons(cls):
for op in __eq__, __lt__, __le__, __gt__, __ge__:
f = comp_func(op)
setattr(cls, '__{}__'.format(op.__name__), f)
return cls
@comparisons
class Enumeration(UnsignedValueNodeMixin, Node):
"r_map Enumeration type"
_nb_attrs = frozenset(['value',])
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __str__(self):
return super().__str__() + ' value: {}'.format(self.value)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main.go
|
package main
import (
"bufio"
"flag"
"log"
"os"
"os/signal"
"strconv"
"syscall"
)
func startWebServer(listenAddress string) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go createWebServer(listenAddress)
<-sigs
}
// Read URLs from a file, if file exist.
// Each URL is added to the URL repo/list
func loadUrlsFile(urlsFile string) {
if !IsPathExists(urlsFile) {
return
}
log.Println("Reading URLs from", urlsFile)
fin, err := os.Open(urlsFile)
if err != nil {
log.Printf("Could not open urls file (%s) for reading", urlsFile)
return
}
defer fin.Close()
scanner := bufio.NewScanner(fin)
i := 0
for scanner.Scan() {
url := scanner.Text()
err := StartScrapingURL(url)
if err != nil {
log.Printf("Error: %s", err)
continue
}
i++
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
log.Printf("Loaded %d urls from file", i)
}
var Config struct {
pollInterval int
verboseLogging bool
dryrun bool
}
func main() {
urlsFile := flag.String("urls", "./urls.csv", "Path to csv with URLs to query (./urls.csv)")
pollInterval := flag.Int("pollInterval", 10, "Frequency in seconds to sleep before next poll (10)")
listenAddress := flag.String("listenAddress", ":8080", "Listen host:port for webserver (:8080)")
flag.Parse()
//store settings in global config
Config.pollInterval = *pollInterval
Config.verboseLogging, _ = strconv.ParseBool(os.Getenv("VERBOSE"))
Config.dryrun, _ = strconv.ParseBool(os.Getenv("DRYRUN"))
RegisterScraper("power.se", NewPowerScraper)
RegisterScraper("komplett.se", NewKomplettScraper)
// optionally, we can load some URLs from file at start
loadUrlsFile(*urlsFile)
// Web server will block until application is stopped by SIGINT/SIGTERM
startWebServer(*listenAddress)
log.Println("Stopped")
}
|
[
"\"VERBOSE\"",
"\"DRYRUN\""
] |
[] |
[
"DRYRUN",
"VERBOSE"
] |
[]
|
["DRYRUN", "VERBOSE"]
|
go
| 2 | 0 | |
cmd/repo_events_user.go
|
// Copyright © 2018 Chris Tava <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"fmt"
"strings"
"time"
"context"
"os"
"github.com/ctava/github-teamwork/github"
"github.com/spf13/cobra"
)
var repoEventsCmdName = "repoevents"
// repoEventsCmd prints out events in a repository associated with a user
var repoEventsCmd = &cobra.Command{
Use: repoEventsCmdName,
Short: repoEventsCmdName + " repo user start_day end_day",
Long: repoEventsCmdName + ` repo user start_day end_day: prints out events by date, user)`,
Run: func(cmd *cobra.Command, args []string) {
githubAuthToken := os.Getenv("GITHUB_ACCESS_TOKEN")
if githubAuthToken == "" {
fmt.Println("warning: will be limited to 60 calls per hour without a token")
}
ctx := context.Background()
fetcher := github.NewFetcher(ctx, githubAuthToken)
repo := getFlagString(cmd, "repo")
user := getFlagString(cmd, "user")
start := getFlagString(cmd, "start")
end := getFlagString(cmd, "end")
startTime, sterr := time.Parse("2006-01-02", start)
if sterr != nil {
fmt.Println("an error occurred while parsing the start time. err:", sterr)
return
}
startYear := startTime.Year()
startMonth := startTime.Month()
endTime, eterr := time.Parse("2006-01-02", end)
if eterr != nil {
fmt.Println("an error occurred while parsing the end time. err:", eterr)
return
}
endYear := endTime.Year()
endMonth := endTime.Month()
var createBranchTimeSeriesDataSet []byte
var pushesTimeSeriesDataSet []byte
var pullrequestsTimeSeriesDataSet []byte
//var issueCreateTimeSeriesDataSet []byte
//var pullrequestreviewsTimeSeriesDataSet []byte
//var pullrequestreviewcommentsTimeSeriesDataSet []byte
var deleteBranchTimeSeriesDataSet []byte
var events []github.RepoEvent
var err error
events, err = fetcher.FetchRepoEvents(ctx, repo)
if err != nil {
fmt.Println("an error occurred while fetching events. err:", err)
return
}
fmt.Printf("%s,%s,%s \n", "created_date", "handle", "type")
for _, e := range events {
if strings.Compare(user, e.Handle) == 0 {
if strings.Compare(e.CreatedAt, start) != -1 {
if strings.Compare(e.CreatedAt, end) != 1 {
fmt.Printf("%s,%s,%s \n", e.CreatedAt, e.Handle, e.Type)
if e.Type == "CreateEvent" {
createBranchTimeSeriesDataSet = append(createBranchTimeSeriesDataSet, e.CreatedAt...)
createBranchTimeSeriesDataSet = append(createBranchTimeSeriesDataSet, "\n"...)
}
if e.Type == "PushEvent" {
pushesTimeSeriesDataSet = append(pushesTimeSeriesDataSet, e.CreatedAt...)
pushesTimeSeriesDataSet = append(pushesTimeSeriesDataSet, "\n"...)
}
if e.Type == "PullRequestEvent" {
pullrequestsTimeSeriesDataSet = append(pullrequestsTimeSeriesDataSet, e.CreatedAt...)
pullrequestsTimeSeriesDataSet = append(pullrequestsTimeSeriesDataSet, "\n"...)
}
if e.Type == "DeleteEvent" {
deleteBranchTimeSeriesDataSet = append(deleteBranchTimeSeriesDataSet, e.CreatedAt...)
deleteBranchTimeSeriesDataSet = append(deleteBranchTimeSeriesDataSet, "\n"...)
}
}
}
}
}
fileRoot := start + "-" + user + "-" + repoEventsCmdName
fileRoot1 := start + "-" + user + "-" + "createbranch"
writeDataSetToFile(fileRoot1+".csv", createBranchTimeSeriesDataSet)
fileRoot2 := start + "-" + user + "-" + "pushes"
writeDataSetToFile(fileRoot2+".csv", pushesTimeSeriesDataSet)
fileRoot3 := start + "-" + user + "-" + "pullrequests"
writeDataSetToFile(fileRoot3+".csv", pullrequestsTimeSeriesDataSet)
fileRoot4 := start + "-" + user + "-" + "deletebranch"
writeDataSetToFile(fileRoot4+".csv", deleteBranchTimeSeriesDataSet)
derr := drawChartWithFourLines(startYear, endYear, startMonth, endMonth, "createbranch", "pushes", "pullrequests", "deletebranch", fileRoot1+".csv", fileRoot2+".csv", fileRoot3+".csv", fileRoot4+".csv", fileRoot+".png")
if derr != nil {
fmt.Println("an error occurred while drawing the chart. err:", derr)
return
}
},
}
func init() {
RootCmd.AddCommand(repoEventsCmd)
repoEventsCmd.Flags().StringP("repo", "R", "", "repo to search")
repoEventsCmd.Flags().StringP("user", "U", "", "user to search")
repoEventsCmd.Flags().StringP("start", "S", "", "user events start day")
repoEventsCmd.Flags().StringP("end", "E", "", "user events end day")
repoEventsCmd.MarkFlagRequired("repo")
repoEventsCmd.MarkFlagRequired("user")
repoEventsCmd.MarkFlagRequired("start")
repoEventsCmd.MarkFlagRequired("end")
}
|
[
"\"GITHUB_ACCESS_TOKEN\""
] |
[] |
[
"GITHUB_ACCESS_TOKEN"
] |
[]
|
["GITHUB_ACCESS_TOKEN"]
|
go
| 1 | 0 | |
bindings/cron/cron_test.go
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package cron
import (
"os"
"testing"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/dapr/pkg/logger"
"github.com/stretchr/testify/assert"
)
func getTestMetadata(schedule string) bindings.Metadata {
m := bindings.Metadata{}
m.Properties = map[string]string{
"schedule": schedule,
}
return m
}
func getNewCron() *Binding {
l := logger.NewLogger("cron")
if os.Getenv("DEBUG") != "" {
l.SetOutputLevel(logger.DebugLevel)
}
return NewCron(l)
}
// go test -v -timeout 15s -count=1 ./bindings/cron/
func TestCronInitSuccess(t *testing.T) {
c := getNewCron()
err := c.Init(getTestMetadata("@every 1h"))
assert.NoErrorf(t, err, "error initializing valid schedule")
}
func TestCronInitWithSeconds(t *testing.T) {
c := getNewCron()
err := c.Init(getTestMetadata("15 * * * * *"))
assert.NoErrorf(t, err, "error initializing schedule with seconds")
}
func TestCronInitFailure(t *testing.T) {
c := getNewCron()
err := c.Init(getTestMetadata("invalid schedule"))
assert.Errorf(t, err, "no error while initializing invalid schedule")
}
// TestLongRead
// go test -v -count=1 -timeout 15s -run TestLongRead ./bindings/cron/
func TestCronReadWithDeleteInvoke(t *testing.T) {
c := getNewCron()
schedule := "@every 1s"
assert.NoErrorf(t, c.Init(getTestMetadata(schedule)), "error initializing valid schedule")
testsNum := 3
i := 0
err := c.Read(func(res *bindings.ReadResponse) error {
assert.NotNil(t, res)
assert.LessOrEqualf(t, i, testsNum, "Invoke didn't stop the schedule")
i++
if i == testsNum {
resp, err := c.Invoke(&bindings.InvokeRequest{
Operation: bindings.DeleteOperation,
})
assert.NoError(t, err)
scheduleVal, exists := resp.Metadata["schedule"]
assert.Truef(t, exists, "Response metadata doesn't include the expected 'schedule' key")
assert.Equal(t, schedule, scheduleVal)
}
return nil
})
assert.NoErrorf(t, err, "error on read")
}
func TestCronInvokeInvalidOperation(t *testing.T) {
c := getNewCron()
initErr := c.Init(getTestMetadata("@every 1s"))
assert.NoErrorf(t, initErr, "Error on Init")
_, err := c.Invoke(&bindings.InvokeRequest{
Operation: bindings.CreateOperation,
})
assert.Error(t, err)
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
app/services/sales-api/handlers/debug/checkgrp/checkgrp.go
|
// Package checkgrp maintains the group of handlers for health checking.
package checkgrp
import (
"context"
"encoding/json"
"net/http"
"os"
"time"
"github.com/ardanlabs/service/business/sys/database"
"github.com/jmoiron/sqlx"
"go.uber.org/zap"
)
// Handlers manages the set of check endpoints.
type Handlers struct {
Build string
Log *zap.SugaredLogger
DB *sqlx.DB
}
// Readiness checks if the database is ready and if not will return a 500 status.
// Do not respond by just returning an error because further up in the call
// stack it will interpret that as a non-trusted error.
func (h Handlers) Readiness(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), time.Second)
defer cancel()
status := "ok"
statusCode := http.StatusOK
if err := database.StatusCheck(ctx, h.DB); err != nil {
status = "db not ready"
statusCode = http.StatusInternalServerError
}
data := struct {
Status string `json:"status"`
}{
Status: status,
}
if err := response(w, statusCode, data); err != nil {
h.Log.Errorw("readiness", "ERROR", err)
}
h.Log.Infow("readiness", "statusCode", statusCode, "method", r.Method, "path", r.URL.Path, "remoteaddr", r.RemoteAddr)
}
// Liveness returns simple status info if the service is alive. If the
// app is deployed to a Kubernetes cluster, it will also return pod, node, and
// namespace details via the Downward API. The Kubernetes environment variables
// need to be set within your Pod/Deployment manifest.
func (h Handlers) Liveness(w http.ResponseWriter, r *http.Request) {
host, err := os.Hostname()
if err != nil {
host = "unavailable"
}
data := struct {
Status string `json:"status,omitempty"`
Build string `json:"build,omitempty"`
Host string `json:"host,omitempty"`
Pod string `json:"pod,omitempty"`
PodIP string `json:"podIP,omitempty"`
Node string `json:"node,omitempty"`
Namespace string `json:"namespace,omitempty"`
}{
Status: "up",
Build: h.Build,
Host: host,
Pod: os.Getenv("KUBERNETES_PODNAME"),
PodIP: os.Getenv("KUBERNETES_NAMESPACE_POD_IP"),
Node: os.Getenv("KUBERNETES_NODENAME"),
Namespace: os.Getenv("KUBERNETES_NAMESPACE"),
}
statusCode := http.StatusOK
if err := response(w, statusCode, data); err != nil {
h.Log.Errorw("liveness", "ERROR", err)
}
// THIS IS A FREE TIMER. WE COULD UPDATE THE METRIC GOROUTINE COUNT HERE.
h.Log.Infow("liveness", "statusCode", statusCode, "method", r.Method, "path", r.URL.Path, "remoteaddr", r.RemoteAddr)
}
func response(w http.ResponseWriter, statusCode int, data any) error {
// Convert the response value to JSON.
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
// Set the content type and headers once we know marshaling has succeeded.
w.Header().Set("Content-Type", "application/json")
// Write the status code to the response.
w.WriteHeader(statusCode)
// Send the result back to the client.
if _, err := w.Write(jsonData); err != nil {
return err
}
return nil
}
|
[
"\"KUBERNETES_PODNAME\"",
"\"KUBERNETES_NAMESPACE_POD_IP\"",
"\"KUBERNETES_NODENAME\"",
"\"KUBERNETES_NAMESPACE\""
] |
[] |
[
"KUBERNETES_NAMESPACE",
"KUBERNETES_NODENAME",
"KUBERNETES_PODNAME",
"KUBERNETES_NAMESPACE_POD_IP"
] |
[]
|
["KUBERNETES_NAMESPACE", "KUBERNETES_NODENAME", "KUBERNETES_PODNAME", "KUBERNETES_NAMESPACE_POD_IP"]
|
go
| 4 | 0 | |
src/django_bootstrap5/core.py
|
from importlib import import_module
from django.conf import settings
BOOTSTRAP5 = {"foo": "bar"}
BOOTSTRAP5_DEFAULTS = {
"css_url": {
"url": "https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css",
"integrity": "sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3",
"crossorigin": "anonymous",
},
"javascript_url": {
"url": "https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.j",
"integrity": "sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p",
"crossorigin": "anonymous",
},
"theme_url": None,
"javascript_in_head": False,
"wrapper_class": "mb-3",
"inline_wrapper_class": "",
"horizontal_label_class": "col-sm-2",
"horizontal_field_class": "col-sm-10",
"horizontal_field_offset_class": "offset-sm-2",
"set_placeholder": True,
"checkbox_layout": None,
"checkbox_style": None,
"required_css_class": "",
"error_css_class": "",
"success_css_class": "",
"server_side_validation": True,
"formset_renderers": {"default": "django_bootstrap5.renderers.FormsetRenderer"},
"form_renderers": {"default": "django_bootstrap5.renderers.FormRenderer"},
"field_renderers": {
"default": "django_bootstrap5.renderers.FieldRenderer",
},
}
def get_bootstrap_setting(name, default=None):
"""Read a setting."""
# Start with a copy of default settings
BOOTSTRAP5 = BOOTSTRAP5_DEFAULTS.copy()
# Override with user settings from settings.py
BOOTSTRAP5.update(getattr(settings, "BOOTSTRAP5", {}))
return BOOTSTRAP5.get(name, default)
def javascript_url():
"""Return the full url to the Bootstrap JavaScript file."""
return get_bootstrap_setting("javascript_url")
def css_url():
"""Return the full url to the Bootstrap CSS file."""
return get_bootstrap_setting("css_url")
def theme_url():
"""Return the full url to the theme CSS file."""
return get_bootstrap_setting("theme_url")
def get_renderer(renderers, **kwargs):
layout = kwargs.get("layout", "")
path = renderers.get(layout, renderers["default"])
mod, cls = path.rsplit(".", 1)
return getattr(import_module(mod), cls)
def get_formset_renderer(**kwargs):
renderers = get_bootstrap_setting("formset_renderers")
return get_renderer(renderers, **kwargs)
def get_form_renderer(**kwargs):
renderers = get_bootstrap_setting("form_renderers")
return get_renderer(renderers, **kwargs)
def get_field_renderer(**kwargs):
renderers = get_bootstrap_setting("field_renderers")
return get_renderer(renderers, **kwargs)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
stacktrace/filesystem_test.go
|
package stacktrace_test
import (
"os"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/yext/glog-contrib/stacktrace"
)
var gopath string
var gopathFolderName string
func init() {
gopath = os.Getenv("GOPATH")
gopathFolderName = filepath.Base(gopath)
}
func TestGopathRelativeFile(t *testing.T) {
assert.Equal(t, "yext/examples/example.go", stacktrace.GopathRelativeFile(path.Join(gopathFolderName, "src/yext/examples/example.go")))
assert.Equal(t, "folder/that-is-not-src/examples/example.go", stacktrace.GopathRelativeFile("folder/that-is-not-src/examples/example.go"))
assert.Equal(t, "/path/to/folder/that-is-not-src/examples/example.go", stacktrace.GopathRelativeFile("/path/to/folder/that-is-not-src/examples/example.go"))
}
func TestGuessAbsPath(t *testing.T) {
assert.Equal(t, path.Join(gopath, "src/yext/example.go"), stacktrace.GuessAbsPath(path.Join(gopathFolderName, "src/yext/example.go")))
assert.Equal(t, "bazel-out/darwin-fastbuild/bin/src/example.go", stacktrace.GuessAbsPath("bazel-out/darwin-fastbuild/bin/src/example.go"))
assert.Equal(t, "external/com_github_grpc_ecosystem_go_grpc_middleware/example.go", stacktrace.GuessAbsPath("external/com_github_grpc_ecosystem_go_grpc_middleware/example.go"))
assert.Equal(t, "GOROOT/src/runtime/asm_amd64.s", stacktrace.GuessAbsPath("GOROOT/src/runtime/asm_amd64.s"))
assert.Equal(t, "/path/to/foo/bar.go", stacktrace.GuessAbsPath("/path/to/foo/bar.go"))
assert.Equal(t, path.Join(gopath, "foo/bar.go"), stacktrace.GuessAbsPath(path.Join(gopath, "foo/bar.go")))
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
Django 3 By Example-Book/Bookmark App/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tpDcc/libs/psd/__init__.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initialization module for tpDcc-libs-psd
"""
from __future__ import print_function, division, absolute_import
import os
import logging.config
def create_logger(dev=False):
"""
Creates logger for current tpDcc-libs-psd package
"""
logger_directory = os.path.normpath(os.path.join(os.path.expanduser('~'), 'tpDcc', 'logs', 'libs'))
if not os.path.isdir(logger_directory):
os.makedirs(logger_directory)
logging_config = os.path.normpath(os.path.join(os.path.dirname(__file__), '__logging__.ini'))
logging.config.fileConfig(logging_config, disable_existing_loggers=False)
logger = logging.getLogger('tpDcc-libs-psd')
dev = os.getenv('TPDCC_DEV', dev)
if dev:
logger.setLevel(logging.DEBUG)
for handler in logger.handlers:
handler.setLevel(logging.DEBUG)
return logger
create_logger()
|
[] |
[] |
[
"TPDCC_DEV"
] |
[]
|
["TPDCC_DEV"]
|
python
| 1 | 0 | |
baremetal/metal3machine_manager.go
|
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package baremetal
import (
"context"
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"os"
"strings"
"time"
// comment for go-lint.
"github.com/go-logr/logr"
bmh "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
capm3 "github.com/metal3-io/cluster-api-provider-metal3/api/v1beta1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
kerrors "k8s.io/apimachinery/pkg/util/errors"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// ProviderName is exported.
ProviderName = "metal3"
// HostAnnotation is the key for an annotation that should go on a Metal3Machine to
// reference what BareMetalHost it corresponds to.
HostAnnotation = "metal3.io/BareMetalHost"
// nodeReuseLabelName is the label set on BMH when node reuse feature is enabled.
nodeReuseLabelName = "infrastructure.cluster.x-k8s.io/node-reuse"
requeueAfter = time.Second * 30
bmRoleControlPlane = "control-plane"
bmRoleNode = "node"
// PausedAnnotationKey is an annotation to be used for pausing a BMH.
PausedAnnotationKey = "metal3.io/capm3"
// ProviderIDPrefix is a prefix for ProviderID.
ProviderIDPrefix = "metal3://"
// ProviderLabelPrefix is a label prefix for ProviderID.
ProviderLabelPrefix = "metal3.io/uuid"
)
var (
// Capm3FastTrack is the variable fetched from the CAPM3_FAST_TRACK environment variable.
Capm3FastTrack = os.Getenv("CAPM3_FAST_TRACK")
hasRequeueAfterError HasRequeueAfterError
notFoundErr *NotFoundError
requeueAfterError *RequeueAfterError
)
// MachineManagerInterface is an interface for a MachineManager.
type MachineManagerInterface interface {
SetFinalizer()
UnsetFinalizer()
IsProvisioned() bool
IsBootstrapReady() bool
GetBaremetalHostID(context.Context) (*string, error)
Associate(context.Context) error
Delete(context.Context) error
Update(context.Context) error
HasAnnotation() bool
GetProviderIDAndBMHID() (string, *string)
SetNodeProviderID(context.Context, string, string, ClientGetter) error
SetProviderID(string)
SetPauseAnnotation(context.Context) error
RemovePauseAnnotation(context.Context) error
DissociateM3Metadata(context.Context) error
AssociateM3Metadata(context.Context) error
SetError(string, capierrors.MachineStatusError)
SetConditionMetal3MachineToFalse(clusterv1.ConditionType, string, clusterv1.ConditionSeverity, string, ...interface{})
SetConditionMetal3MachineToTrue(clusterv1.ConditionType)
}
// MachineManager is responsible for performing machine reconciliation.
type MachineManager struct {
client client.Client
Cluster *clusterv1.Cluster
Metal3Cluster *capm3.Metal3Cluster
MachineList *clusterv1.MachineList
Machine *clusterv1.Machine
Metal3Machine *capm3.Metal3Machine
Metal3MachineTemplate *capm3.Metal3MachineTemplate
MachineSet *clusterv1.MachineSet
MachineSetList *clusterv1.MachineSetList
Log logr.Logger
}
// NewMachineManager returns a new helper for managing a machine.
func NewMachineManager(client client.Client,
cluster *clusterv1.Cluster, metal3Cluster *capm3.Metal3Cluster,
machine *clusterv1.Machine, metal3machine *capm3.Metal3Machine,
machineLog logr.Logger) (*MachineManager, error) {
return &MachineManager{
client: client,
Cluster: cluster,
Metal3Cluster: metal3Cluster,
Machine: machine,
Metal3Machine: metal3machine,
Log: machineLog,
}, nil
}
// NewMachineSetManager returns a new helper for managing a machineset.
func NewMachineSetManager(client client.Client,
machine *clusterv1.Machine, machineSetList *clusterv1.MachineSetList,
machineLog logr.Logger) (*MachineManager, error) {
return &MachineManager{
client: client,
Machine: machine,
MachineSetList: machineSetList,
Log: machineLog,
}, nil
}
// SetFinalizer sets finalizer.
func (m *MachineManager) SetFinalizer() {
// If the Metal3Machine doesn't have finalizer, add it.
if !Contains(m.Metal3Machine.Finalizers, capm3.MachineFinalizer) {
m.Metal3Machine.Finalizers = append(m.Metal3Machine.Finalizers,
capm3.MachineFinalizer,
)
}
}
// UnsetFinalizer unsets finalizer.
func (m *MachineManager) UnsetFinalizer() {
// Cluster is deleted so remove the finalizer.
m.Metal3Machine.Finalizers = Filter(m.Metal3Machine.Finalizers,
capm3.MachineFinalizer,
)
}
// IsProvisioned checks if the metal3machine is provisioned.
func (m *MachineManager) IsProvisioned() bool {
if m.Metal3Machine.Spec.ProviderID != nil && m.Metal3Machine.Status.Ready {
return true
}
return false
}
// IsBootstrapReady checks if the machine is given Bootstrap data.
func (m *MachineManager) IsBootstrapReady() bool {
return m.Machine.Status.BootstrapReady
}
// isControlPlane returns true if the machine is a control plane.
func (m *MachineManager) isControlPlane() bool {
return util.IsControlPlaneMachine(m.Machine)
}
// role returns the machine role from the labels.
func (m *MachineManager) role() string {
if util.IsControlPlaneMachine(m.Machine) {
return bmRoleControlPlane
}
return bmRoleNode
}
// RemovePauseAnnotation checks and/or Removes the pause annotations on associated bmh.
func (m *MachineManager) RemovePauseAnnotation(ctx context.Context) error {
// look for associated BMH
host, helper, err := m.getHost(ctx)
if err != nil {
m.SetError("Failed to get a BaremetalHost for the Metal3Machine",
capierrors.CreateMachineError,
)
return err
}
if host == nil {
return nil
}
annotations := host.GetAnnotations()
if annotations != nil {
if _, ok := annotations[bmh.PausedAnnotation]; ok {
if m.Cluster.Name == host.Labels[clusterv1.ClusterLabelName] && annotations[bmh.PausedAnnotation] == PausedAnnotationKey {
// Removing BMH Paused Annotation Since Owner Cluster is not paused.
delete(host.Annotations, bmh.PausedAnnotation)
} else if m.Cluster.Name == host.Labels[clusterv1.ClusterLabelName] && annotations[bmh.PausedAnnotation] != PausedAnnotationKey {
m.Log.Info("BMH is paused by user. Not removing Pause Annotation")
return nil
}
}
}
return helper.Patch(ctx, host)
}
// SetPauseAnnotation sets the pause annotations on associated bmh.
func (m *MachineManager) SetPauseAnnotation(ctx context.Context) error {
// look for associated BMH
host, helper, err := m.getHost(ctx)
if err != nil {
m.SetError("Failed to get a BaremetalHost for the Metal3Machine",
capierrors.UpdateMachineError,
)
return err
}
if host == nil {
return nil
}
annotations := host.GetAnnotations()
if annotations != nil {
if _, ok := annotations[bmh.PausedAnnotation]; ok {
m.Log.Info("BaremetalHost is already paused")
return nil
}
} else {
host.Annotations = make(map[string]string)
}
m.Log.Info("Adding PausedAnnotation in BareMetalHost")
host.Annotations[bmh.PausedAnnotation] = PausedAnnotationKey
// Setting annotation with BMH status
newAnnotation, err := json.Marshal(&host.Status)
if err != nil {
m.SetError("Failed to marshal the BareMetalHost status",
capierrors.UpdateMachineError,
)
return errors.Wrap(err, "failed to marshall status annotation")
}
host.Annotations[bmh.StatusAnnotation] = string(newAnnotation)
return helper.Patch(ctx, host)
}
// GetBaremetalHostID return the provider identifier for this machine.
func (m *MachineManager) GetBaremetalHostID(ctx context.Context) (*string, error) {
// look for associated BMH
host, _, err := m.getHost(ctx)
if err != nil {
m.SetError("Failed to get a BaremetalHost for the Metal3Machine",
capierrors.CreateMachineError,
)
return nil, err
}
if host == nil {
m.Log.Info("BaremetalHost not associated, requeuing")
return nil, &RequeueAfterError{RequeueAfter: requeueAfter}
}
if host.Status.Provisioning.State == bmh.StateProvisioned {
return pointer.StringPtr(string(host.ObjectMeta.UID)), nil
}
m.Log.Info("Provisioning BaremetalHost, requeuing")
// Do not requeue since BMH update will trigger a reconciliation
return nil, nil
}
// Associate associates a machine and is invoked by the Machine Controller.
func (m *MachineManager) Associate(ctx context.Context) error {
m.Log.Info("Associating machine", "machine", m.Machine.Name)
// load and validate the config
if m.Metal3Machine == nil {
// Should have been picked earlier. Do not requeue
return nil
}
config := m.Metal3Machine.Spec
err := config.IsValid()
if err != nil {
// Should have been picked earlier. Do not requeue
m.SetError(err.Error(), capierrors.InvalidConfigurationMachineError)
return nil
}
// clear an error if one was previously set
m.clearError()
// look for associated BMH
host, helper, err := m.getHost(ctx)
if err != nil {
m.SetError("Failed to get the BaremetalHost for the Metal3Machine",
capierrors.CreateMachineError,
)
return err
}
// no BMH found, trying to choose from available ones
if host == nil {
host, helper, err = m.chooseHost(ctx)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to pick a BaremetalHost for the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
if host == nil {
m.Log.Info("No available host found. Requeuing.")
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
m.Log.Info("Associating machine with host", "host", host.Name)
} else {
m.Log.Info("Machine already associated with host", "host", host.Name)
}
// A machine bootstrap not ready case is caught in the controller
// ReconcileNormal function
err = m.getUserDataSecretName(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to set the UserData for the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
err = m.setHostLabel(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to set the Cluster label in the BareMetalHost",
capierrors.CreateMachineError,
)
}
return err
}
err = m.setHostConsumerRef(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to associate the BaremetalHost to the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
// If the user did not provide a DataTemplate, we can directly set the host
// specs, nothing to wait for.
if m.Metal3Machine.Spec.DataTemplate == nil {
if err = m.setHostSpec(ctx, host); err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to associate the BaremetalHost to the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
}
err = m.setBMCSecretLabel(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to associate the BaremetalHost to the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
err = helper.Patch(ctx, host)
if err != nil {
var aggr kerrors.Aggregate
if ok := errors.As(err, aggr); ok {
for _, kerr := range aggr.Errors() {
if apierrors.IsConflict(kerr) {
return &RequeueAfterError{}
}
}
}
return err
}
err = m.ensureAnnotation(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to annotate the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
if m.Metal3Machine.Spec.DataTemplate != nil {
// Requeue to get the DataTemplate output. We need to requeue to trigger the
// wait on the Metal3DataTemplate
if err := m.WaitForM3Metadata(ctx); err != nil {
return err
}
// If the requeue is not needed, then get the updated host and set the host
// specs
host, helper, err = m.getHost(ctx)
if err != nil {
m.SetError("Failed to get the BaremetalHost for the Metal3Machine",
capierrors.CreateMachineError,
)
return err
}
if err = m.setHostSpec(ctx, host); err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to set the BaremetalHost Specs",
capierrors.CreateMachineError,
)
}
return err
}
// Update the BMH object.
err = helper.Patch(ctx, host)
if err != nil {
var aggr kerrors.Aggregate
if ok := errors.As(err, aggr); ok {
for _, kerr := range aggr.Errors() {
if apierrors.IsConflict(kerr) {
return &RequeueAfterError{}
}
}
}
return err
}
}
m.Log.Info("Finished associating machine")
return nil
}
// getUserDataSecretName gets the UserDataSecretName from the machine and exposes it as a secret
// for the BareMetalHost. The UserDataSecretName might already be in a secret with
// CABPK v0.3.0+, but if it is in a different namespace than the BareMetalHost,
// then we need to create the secret.
func (m *MachineManager) getUserDataSecretName(ctx context.Context, host *bmh.BareMetalHost) error {
if m.Metal3Machine.Status.UserData != nil {
return nil
}
if m.Metal3Machine.Spec.UserData != nil {
m.Metal3Machine.Status.UserData = m.Metal3Machine.Spec.UserData
}
// if datasecretname is set just pass the reference.
if m.Machine.Spec.Bootstrap.DataSecretName != nil {
m.Metal3Machine.Status.UserData = &corev1.SecretReference{
Name: *m.Machine.Spec.Bootstrap.DataSecretName,
Namespace: m.Machine.Namespace,
}
return nil
} else if m.Machine.Spec.Bootstrap.ConfigRef != nil {
m.Metal3Machine.Status.UserData = &corev1.SecretReference{
Name: m.Machine.Spec.Bootstrap.ConfigRef.Name,
Namespace: m.Machine.Spec.Bootstrap.ConfigRef.Namespace,
}
}
return nil
}
// Delete deletes a metal3 machine and is invoked by the Machine Controller.
func (m *MachineManager) Delete(ctx context.Context) error {
m.Log.Info("Deleting metal3 machine", "metal3machine", m.Metal3Machine.Name)
// clear an error if one was previously set.
m.clearError()
if Capm3FastTrack == "" {
Capm3FastTrack = "false"
m.Log.Info("Capm3FastTrack is not set, setting it to default value false")
}
host, helper, err := m.getHost(ctx)
if err != nil {
return err
}
if host != nil && host.Spec.ConsumerRef != nil {
// don't remove the ConsumerRef if it references some other metal3 machine
if !consumerRefMatches(host.Spec.ConsumerRef, m.Metal3Machine) {
m.Log.Info("host already associated with another metal3 machine",
"host", host.Name)
// Remove the ownerreference to this machine, even if the consumer ref
// references another machine.
host.OwnerReferences, err = m.DeleteOwnerRef(host.OwnerReferences)
if err != nil {
return err
}
return nil
}
// Remove clusterLabel from BMC secret.
tmpBMCSecret, errBMC := m.getBMCSecret(ctx, host)
if errBMC != nil && apierrors.IsNotFound(errBMC) {
m.Log.Info("BMC credential not found for BareMetalhost", host.Name)
} else if errBMC == nil && tmpBMCSecret != nil {
m.Log.Info("Deleting cluster label from BMC credential", host.Spec.BMC.CredentialsName)
if tmpBMCSecret.Labels != nil && tmpBMCSecret.Labels[clusterv1.ClusterLabelName] == m.Machine.Spec.ClusterName {
delete(tmpBMCSecret.Labels, clusterv1.ClusterLabelName)
errBMC = updateObject(ctx, m.client, tmpBMCSecret)
if errBMC != nil {
if ok := errors.As(errBMC, &hasRequeueAfterError); !ok {
m.Log.Info("Failed to delete the clusterLabel from BMC Secret")
}
return errBMC
}
}
}
bmhUpdated := false
if host.Spec.Image != nil {
host.Spec.Image = nil
bmhUpdated = true
}
if m.Metal3Machine.Status.UserData != nil && host.Spec.UserData != nil {
host.Spec.UserData = nil
bmhUpdated = true
}
if m.Metal3Machine.Status.MetaData != nil && host.Spec.MetaData != nil {
host.Spec.MetaData = nil
bmhUpdated = true
}
if m.Metal3Machine.Status.NetworkData != nil && host.Spec.NetworkData != nil {
host.Spec.NetworkData = nil
bmhUpdated = true
}
// Change bmh's online status to on/off based on AutomatedCleaningMode and Capm3FastTrack values
// AutomatedCleaningMode | Capm3FastTrack| BMH
// disabled false turn off
// disabled true turn off
// metadata false turn off
// metadata true turn on
onlineStatus := host.Spec.Online
if host.Spec.AutomatedCleaningMode == "disabled" {
host.Spec.Online = false
} else if Capm3FastTrack == "true" {
host.Spec.Online = true
} else if Capm3FastTrack == "false" {
host.Spec.Online = false
}
m.Log.Info(fmt.Sprintf("Host %v AutomatedCleaningMode is %v, setting Online field to %v", host.Name, host.Spec.AutomatedCleaningMode, host.Spec.Online))
if onlineStatus != host.Spec.Online {
bmhUpdated = true
}
if bmhUpdated {
// Update the BMH object, if the errors are NotFound, do not return the
// errors.
if err := patchIfFound(ctx, helper, host); err != nil {
return err
}
m.Log.Info("Deprovisioning BaremetalHost, requeuing")
return &RequeueAfterError{}
}
waiting := true
switch host.Status.Provisioning.State {
case bmh.StateRegistering,
bmh.StateMatchProfile, bmh.StateInspecting,
bmh.StateReady, bmh.StateAvailable, bmh.StateNone,
bmh.StateUnmanaged:
// Host is not provisioned.
waiting = false
case bmh.StateExternallyProvisioned:
// We have no control over provisioning, so just wait until the
// host is powered off.
waiting = host.Status.PoweredOn
}
if waiting {
m.Log.Info("Deprovisioning BaremetalHost, requeuing")
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
if m.Cluster != nil {
// If cluster has DeletionTimestamp set, skip checking if nodeReuse
// feature is enabled.
if m.Cluster.DeletionTimestamp.IsZero() {
// Fetch corresponding Metal3MachineTemplate, to see if nodeReuse
// feature is enabled. If set to true, check the machine role. In case
// machine role is ControlPlane, set nodeReuseLabelName to KubeadmControlPlane
// name, otherwise to MachineDeployment name.
m.Log.Info("Getting Metal3MachineTemplate")
m3mt := &capm3.Metal3MachineTemplate{}
if m.Metal3Machine == nil {
return errors.New("Metal3Machine associated with Metal3MachineTemplate is not found")
}
if m.hasTemplateAnnotation() {
m3mtKey := client.ObjectKey{
Name: m.Metal3Machine.ObjectMeta.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation],
Namespace: m.Metal3Machine.Namespace,
}
if err := m.client.Get(ctx, m3mtKey, m3mt); err != nil {
// we are here, because while normal deprovisioning, Metal3MachineTemplate will be deleted first
// and we can't get it even though Metal3Machine has reference to it. We consider it nil and move
// forward with normal deprovisioning.
m3mt = nil
m.Log.Info("Metal3MachineTemplate associated with Metal3Machine is deleted")
} else {
// in case of upgrading, Metal3MachineTemplate will not be deleted and we can fetch it,
// in order to check for node reuse feature in the next step.
m.Log.Info(fmt.Sprintf("Found Metal3machineTemplate %v", m3mtKey.Name))
}
}
if m3mt != nil {
if m3mt.Spec.NodeReuse {
if host.Labels == nil {
host.Labels = make(map[string]string)
}
// Check if machine is ControlPlane
if m.isControlPlane() {
// Fetch KubeadmControlPlane name for controlplane machine
m.Log.Info(fmt.Sprintf("Fetch KubeadmControlPlane name while deprovisioning host %v", host.Name))
kcpName, err := m.getKubeadmControlPlaneName(ctx)
if err != nil {
return err
}
// Set nodeReuseLabelName on the host to KubeadmControlPlane name
m.Log.Info(fmt.Sprintf("Setting nodeReuseLabelName in host %v to fetched KubeadmControlPlane name %v", host.Name, kcpName))
host.Labels[nodeReuseLabelName] = kcpName
} else {
// Fetch MachineDeployment name for worker machine
m.Log.Info(fmt.Sprintf("Fetch MachineDeployment name while deprovisioning host %v", host.Name))
mdName, err := m.getMachineDeploymentName(ctx)
if err != nil {
return err
}
// Set nodeReuseLabelName on the host to MachineDeployment name
m.Log.Info(fmt.Sprintf("Setting nodeReuseLabelName in host %v to fetched MachineDeployment name %v", host.Name, mdName))
host.Labels[nodeReuseLabelName] = mdName
}
}
}
}
}
host.Spec.ConsumerRef = nil
// Delete created secret, if data was set without DataSecretName
if m.Machine.Spec.Bootstrap.DataSecretName == nil {
m.Log.Info("Deleting User data secret for machine")
if m.Metal3Machine.Status.UserData != nil {
err = deleteSecret(ctx, m.client, m.Metal3Machine.Status.UserData.Name,
m.Metal3Machine.Namespace,
)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to delete userdata secret",
capierrors.DeleteMachineError,
)
}
return err
}
}
}
host.Spec.ConsumerRef = nil
// Remove the ownerreference to this machine.
host.OwnerReferences, err = m.DeleteOwnerRef(host.OwnerReferences)
if err != nil {
return err
}
if host.Labels != nil && host.Labels[clusterv1.ClusterLabelName] == m.Machine.Spec.ClusterName {
delete(host.Labels, clusterv1.ClusterLabelName)
}
m.Log.Info("Removing Paused Annotation (if any)")
if host.Annotations != nil && host.Annotations[bmh.PausedAnnotation] == PausedAnnotationKey {
delete(host.Annotations, bmh.PausedAnnotation)
}
// Update the BMH object, if the errors are NotFound, do not return the
// errors.
if err := patchIfFound(ctx, helper, host); err != nil {
return err
}
}
m.Log.Info("finished deleting metal3 machine")
return nil
}
// Update updates a machine and is invoked by the Machine Controller.
func (m *MachineManager) Update(ctx context.Context) error {
m.Log.Info("Updating machine")
// clear any error message that was previously set. This method doesn't set
// error messages yet, so we know that it's incorrect to have one here.
m.clearError()
host, helper, err := m.getHost(ctx)
if err != nil {
return err
}
if host == nil {
return errors.Errorf("host not found for machine %s", m.Machine.Name)
}
if err := m.WaitForM3Metadata(ctx); err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to get the DataTemplate",
capierrors.CreateMachineError,
)
}
return err
}
// ensure that the BMH specs are correctly set.
err = m.setHostConsumerRef(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to associate the BaremetalHost to the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
// ensure that the BMH specs are correctly set.
err = m.setHostSpec(ctx, host)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
m.SetError("Failed to associate the BaremetalHost to the Metal3Machine",
capierrors.CreateMachineError,
)
}
return err
}
err = helper.Patch(ctx, host)
if err != nil {
return err
}
err = m.ensureAnnotation(ctx, host)
if err != nil {
return err
}
if err := m.updateMachineStatus(ctx, host); err != nil {
return err
}
m.Log.Info("Finished updating machine")
return nil
}
// exists tests for the existence of a baremetalHost.
func (m *MachineManager) exists(ctx context.Context) (bool, error) {
m.Log.Info("Checking if host exists.")
host, _, err := m.getHost(ctx)
if err != nil {
return false, err
}
if host == nil {
m.Log.Info("Host does not exist.")
return false, nil
}
m.Log.Info("Host exists.")
return true, nil
}
// getHost gets the associated host by looking for an annotation on the machine
// that contains a reference to the host. Returns nil if not found. Assumes the
// host is in the same namespace as the machine.
func (m *MachineManager) getHost(ctx context.Context) (*bmh.BareMetalHost, *patch.Helper, error) {
host, err := getHost(ctx, m.Metal3Machine, m.client, m.Log)
if err != nil || host == nil {
return host, nil, err
}
helper, err := patch.NewHelper(host, m.client)
return host, helper, err
}
func getHost(ctx context.Context, m3Machine *capm3.Metal3Machine, cl client.Client,
mLog logr.Logger,
) (*bmh.BareMetalHost, error) {
annotations := m3Machine.ObjectMeta.GetAnnotations()
if annotations == nil {
return nil, nil
}
hostKey, ok := annotations[HostAnnotation]
if !ok {
return nil, nil
}
hostNamespace, hostName, err := cache.SplitMetaNamespaceKey(hostKey)
if err != nil {
mLog.Error(err, "Error parsing annotation value", "annotation key", hostKey)
return nil, err
}
host := bmh.BareMetalHost{}
key := client.ObjectKey{
Name: hostName,
Namespace: hostNamespace,
}
err = cl.Get(ctx, key, &host)
if apierrors.IsNotFound(err) {
mLog.Info("Annotated host not found", "host", hostKey)
return nil, nil
} else if err != nil {
return nil, err
}
return &host, nil
}
// chooseHost iterates through known hosts and returns one that can be
// associated with the metal3 machine. It searches all hosts in case one already has an
// association with this metal3 machine.
func (m *MachineManager) chooseHost(ctx context.Context) (*bmh.BareMetalHost, *patch.Helper, error) {
// get list of BMH.
hosts := bmh.BareMetalHostList{}
// without this ListOption, all namespaces would be including in the listing.
opts := &client.ListOptions{
Namespace: m.Metal3Machine.Namespace,
}
err := m.client.List(ctx, &hosts, opts)
if err != nil {
return nil, nil, err
}
// Using the label selector on ListOptions above doesn't seem to work.
// I think it's because we have a local cache of all BareMetalHosts.
labelSelector := labels.NewSelector()
var reqs labels.Requirements
for labelKey, labelVal := range m.Metal3Machine.Spec.HostSelector.MatchLabels {
m.Log.Info("Adding requirement to match label",
"label key", labelKey,
"label value", labelVal)
r, err := labels.NewRequirement(labelKey, selection.Equals, []string{labelVal})
if err != nil {
m.Log.Error(err, "Failed to create MatchLabel requirement, not choosing host")
return nil, nil, err
}
reqs = append(reqs, *r)
}
for _, req := range m.Metal3Machine.Spec.HostSelector.MatchExpressions {
m.Log.Info("Adding requirement to match label",
"label key", req.Key,
"label operator", req.Operator,
"label value", req.Values)
lowercaseOperator := selection.Operator(strings.ToLower(string(req.Operator)))
r, err := labels.NewRequirement(req.Key, lowercaseOperator, req.Values)
if err != nil {
m.Log.Error(err, "Failed to create MatchExpression requirement, not choosing host")
return nil, nil, err
}
reqs = append(reqs, *r)
}
labelSelector = labelSelector.Add(reqs...)
availableHosts := []*bmh.BareMetalHost{}
availableHostsWithNodeReuse := []*bmh.BareMetalHost{}
for i, host := range hosts.Items {
host := host
if host.Spec.ConsumerRef != nil && consumerRefMatches(host.Spec.ConsumerRef, m.Metal3Machine) {
m.Log.Info("Found host with existing ConsumerRef", "host", host.Name)
helper, err := patch.NewHelper(&hosts.Items[i], m.client)
return &hosts.Items[i], helper, err
}
if host.Spec.ConsumerRef != nil ||
(m.nodeReuseLabelExists(ctx, &host) &&
!m.nodeReuseLabelMatches(ctx, &host)) {
continue
}
if host.GetDeletionTimestamp() != nil {
continue
}
if host.Status.ErrorMessage != "" {
continue
}
// continue if BaremetalHost is paused or marked with UnhealthyAnnotation.
annotations := host.GetAnnotations()
if annotations != nil {
if _, ok := annotations[bmh.PausedAnnotation]; ok {
continue
}
if _, ok := annotations[capm3.UnhealthyAnnotation]; ok {
continue
}
}
if labelSelector.Matches(labels.Set(host.ObjectMeta.Labels)) {
if m.nodeReuseLabelExists(ctx, &host) && m.nodeReuseLabelMatches(ctx, &host) {
m.Log.Info(fmt.Sprintf("Found host %v with nodeReuseLabelName and it matches, adding it to availableHostsWithNodeReuse list", host.Name))
availableHostsWithNodeReuse = append(availableHostsWithNodeReuse, &hosts.Items[i])
} else if !m.nodeReuseLabelExists(ctx, &host) {
switch host.Status.Provisioning.State {
case bmh.StateReady, bmh.StateAvailable:
default:
continue
}
m.Log.Info(fmt.Sprintf("Host %v matched hostSelector for Metal3Machine, adding it to availableHosts list", host.Name))
availableHosts = append(availableHosts, &hosts.Items[i])
}
} else {
m.Log.Info(fmt.Sprintf("Host %v did not match hostSelector for Metal3Machine", host.Name))
}
}
m.Log.Info(fmt.Sprintf("%d hosts available with nodeReuseLabelName while choosing host for Metal3 machine", len(availableHostsWithNodeReuse)))
m.Log.Info(fmt.Sprintf("%d hosts available while choosing host for Metal3 machine", len(availableHosts)))
if len(availableHostsWithNodeReuse) == 0 && len(availableHosts) == 0 {
return nil, nil, nil
}
// choose a host.
var chosenHost *bmh.BareMetalHost
// If there are hosts with nodeReuseLabelName:
if len(availableHostsWithNodeReuse) != 0 {
for _, host := range availableHostsWithNodeReuse {
// Build list of hosts in Ready state with nodeReuseLabelName
hostsInAvailableStateWithNodeReuse := []*bmh.BareMetalHost{}
// Build list of hosts in any other state than Ready state with nodeReuseLabelName
hostsInNotAvailableStateWithNodeReuse := []*bmh.BareMetalHost{}
if host.Status.Provisioning.State == bmh.StateReady || host.Status.Provisioning.State == bmh.StateAvailable {
hostsInAvailableStateWithNodeReuse = append(hostsInAvailableStateWithNodeReuse, host)
} else {
hostsInNotAvailableStateWithNodeReuse = append(hostsInNotAvailableStateWithNodeReuse, host)
}
// If host is found in `Ready` state, pick it
if len(hostsInAvailableStateWithNodeReuse) != 0 {
m.Log.Info(fmt.Sprintf("Found %v host(s) with nodeReuseLabelName in Ready/Available state, choosing the host %v", len(hostsInAvailableStateWithNodeReuse), host.Name))
rHost, _ := rand.Int(rand.Reader, big.NewInt(int64(len(hostsInAvailableStateWithNodeReuse))))
randomHost := rHost.Int64()
chosenHost = hostsInAvailableStateWithNodeReuse[randomHost]
} else if len(hostsInNotAvailableStateWithNodeReuse) != 0 {
m.Log.Info(fmt.Sprintf("Found %v host(s) with nodeReuseLabelName in %v state, requeuing the host %v", len(hostsInNotAvailableStateWithNodeReuse), host.Status.Provisioning.State, host.Name))
return nil, nil, &RequeueAfterError{RequeueAfter: requeueAfter}
}
}
} else {
// If there are no hosts with nodeReuseLabelName, fall back
// to the current flow and select hosts randomly.
m.Log.Info(fmt.Sprintf("%d host(s) available, choosing a random host", len(availableHosts)))
rHost, _ := rand.Int(rand.Reader, big.NewInt(int64(len(availableHosts))))
randomHost := rHost.Int64()
chosenHost = availableHosts[randomHost]
}
helper, err := patch.NewHelper(chosenHost, m.client)
return chosenHost, helper, err
}
// consumerRefMatches returns a boolean based on whether the consumer
// reference and bare metal machine metadata match.
func consumerRefMatches(consumer *corev1.ObjectReference, m3machine *capm3.Metal3Machine) bool {
if consumer.Name != m3machine.Name {
return false
}
if consumer.Namespace != m3machine.Namespace {
return false
}
if consumer.Kind != m3machine.Kind {
return false
}
if consumer.GroupVersionKind().Group != m3machine.GroupVersionKind().Group {
return false
}
return true
}
// nodeReuseLabelMatches returns true if nodeReuseLabelName matches KubeadmControlPlane or MachineDeployment name on the host.
func (m *MachineManager) nodeReuseLabelMatches(ctx context.Context, host *bmh.BareMetalHost) bool {
if host == nil {
return false
}
if host.Labels == nil {
return false
}
if m.isControlPlane() {
kcp, err := m.getKubeadmControlPlaneName(ctx)
if err != nil {
return false
}
if host.Labels[nodeReuseLabelName] == "" {
return false
}
if host.Labels[nodeReuseLabelName] != kcp {
return false
}
m.Log.Info(fmt.Sprintf("nodeReuseLabelName on the host %v matches KubeadmControlPlane name %v", host.Name, kcp))
return true
}
md, err := m.getMachineDeploymentName(ctx)
if err != nil {
return false
}
if host.Labels[nodeReuseLabelName] == "" {
return false
}
if host.Labels[nodeReuseLabelName] != md {
return false
}
m.Log.Info(fmt.Sprintf("nodeReuseLabelName on the host %v matches MachineDeployment name %v", host.Name, md))
return true
}
// nodeReuseLabelExists returns true if host contains nodeReuseLabelName label.
func (m *MachineManager) nodeReuseLabelExists(ctx context.Context, host *bmh.BareMetalHost) bool {
if host == nil {
return false
}
if host.Labels == nil {
return false
}
_, ok := host.Labels[nodeReuseLabelName]
m.Log.Info(fmt.Sprintf("nodeReuseLabelName exists on the host %v", host.Name))
return ok
}
// getBMCSecret will return the BMCSecret associated with BMH.
func (m *MachineManager) getBMCSecret(ctx context.Context, host *bmh.BareMetalHost) (*corev1.Secret, error) {
if host.Spec.BMC.CredentialsName == "" {
return nil, nil
}
tmpBMCSecret := corev1.Secret{}
key := host.CredentialsKey()
err := m.client.Get(ctx, key, &tmpBMCSecret)
if err != nil {
m.Log.Info("Cannot retrieve BMC credential for BareMetalhost ", host.Name, err)
return nil, err
}
return &tmpBMCSecret, nil
}
// setBMCSecretLabel will set the set cluster.x-k8s.io/cluster-name to BMCSecret.
func (m *MachineManager) setBMCSecretLabel(ctx context.Context, host *bmh.BareMetalHost) error {
tmpBMCSecret, err := m.getBMCSecret(ctx, host)
if err != nil {
return err
}
if tmpBMCSecret != nil {
if tmpBMCSecret.Labels == nil {
tmpBMCSecret.Labels = make(map[string]string)
}
tmpBMCSecret.Labels[clusterv1.ClusterLabelName] = m.Machine.Spec.ClusterName
return updateObject(ctx, m.client, tmpBMCSecret)
}
return nil
}
// setHostLabel will set the set cluster.x-k8s.io/cluster-name to bmh.
func (m *MachineManager) setHostLabel(ctx context.Context, host *bmh.BareMetalHost) error {
if host.Labels == nil {
host.Labels = make(map[string]string)
}
host.Labels[clusterv1.ClusterLabelName] = m.Machine.Spec.ClusterName
return nil
}
// setHostSpec will ensure the host's Spec is set according to the machine's
// details. It will then update the host via the kube API. If UserData does not
// include a Namespace, it will default to the Metal3Machine's namespace.
func (m *MachineManager) setHostSpec(ctx context.Context, host *bmh.BareMetalHost) error {
// We only want to update the image setting if the host does not
// already have an image.
//
// A host with an existing image is already provisioned and
// upgrades are not supported at this time. To re-provision a
// host, we must fully deprovision it and then provision it again.
// Not provisioning while we do not have the UserData.
if host.Spec.Image == nil && m.Metal3Machine.Status.UserData != nil {
checksumType := ""
if m.Metal3Machine.Spec.Image.ChecksumType != nil {
checksumType = *m.Metal3Machine.Spec.Image.ChecksumType
}
host.Spec.Image = &bmh.Image{
URL: m.Metal3Machine.Spec.Image.URL,
Checksum: m.Metal3Machine.Spec.Image.Checksum,
ChecksumType: bmh.ChecksumType(checksumType),
DiskFormat: m.Metal3Machine.Spec.Image.DiskFormat,
}
host.Spec.UserData = m.Metal3Machine.Status.UserData
if host.Spec.UserData != nil && host.Spec.UserData.Namespace == "" {
host.Spec.UserData.Namespace = host.Namespace
}
// Set metadata from gathering from Spec.metadata and from the template.
if m.Metal3Machine.Status.MetaData != nil {
host.Spec.MetaData = m.Metal3Machine.Status.MetaData
}
if host.Spec.MetaData != nil && host.Spec.MetaData.Namespace == "" {
host.Spec.MetaData.Namespace = m.Machine.Namespace
}
if m.Metal3Machine.Status.NetworkData != nil {
host.Spec.NetworkData = m.Metal3Machine.Status.NetworkData
}
if host.Spec.NetworkData != nil && host.Spec.NetworkData.Namespace == "" {
host.Spec.NetworkData.Namespace = m.Machine.Namespace
}
}
// Set automatedCleaningMode from metal3Machine.spec.automatedCleaningMode.
if m.Metal3Machine.Spec.AutomatedCleaningMode != nil {
if host.Spec.AutomatedCleaningMode != bmh.AutomatedCleaningMode(*m.Metal3Machine.Spec.AutomatedCleaningMode) {
host.Spec.AutomatedCleaningMode = bmh.AutomatedCleaningMode(*m.Metal3Machine.Spec.AutomatedCleaningMode)
}
}
host.Spec.Online = true
return nil
}
// setHostConsumerRef will ensure the host's Spec is set to link to this
// Metal3Machine.
func (m *MachineManager) setHostConsumerRef(ctx context.Context, host *bmh.BareMetalHost) error {
host.Spec.ConsumerRef = &corev1.ObjectReference{
Kind: "Metal3Machine",
Name: m.Metal3Machine.Name,
Namespace: m.Metal3Machine.Namespace,
APIVersion: m.Metal3Machine.APIVersion,
}
// Set OwnerReferences.
hostOwnerReferences, err := m.SetOwnerRef(host.OwnerReferences, true)
if err != nil {
return err
}
host.OwnerReferences = hostOwnerReferences
// Delete nodeReuseLabelName from host.
m.Log.Info("Deleting nodeReuseLabelName from host, if any")
labels := host.GetLabels()
if labels != nil {
if _, ok := labels[nodeReuseLabelName]; ok {
delete(host.Labels, nodeReuseLabelName)
m.Log.Info("Finished deleting nodeReuseLabelName")
}
}
return nil
}
// ensureAnnotation makes sure the machine has an annotation that references the
// host and uses the API to update the machine if necessary.
func (m *MachineManager) ensureAnnotation(ctx context.Context, host *bmh.BareMetalHost) error {
annotations := m.Metal3Machine.ObjectMeta.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
hostKey, err := cache.MetaNamespaceKeyFunc(host)
if err != nil {
m.Log.Error(err, "Error parsing annotation value", "annotation key", hostKey)
return err
}
existing, ok := annotations[HostAnnotation]
if ok {
if existing == hostKey {
return nil
}
m.Log.Info("Warning: found stray annotation for host on machine. Overwriting.", "host", existing)
}
annotations[HostAnnotation] = hostKey
m.Metal3Machine.ObjectMeta.SetAnnotations(annotations)
return nil
}
// HasAnnotation makes sure the machine has an annotation that references a host.
func (m *MachineManager) HasAnnotation() bool {
annotations := m.Metal3Machine.ObjectMeta.GetAnnotations()
if annotations == nil {
return false
}
_, ok := annotations[HostAnnotation]
return ok
}
// hasTemplateAnnotation makes sure the metal3 machine has infrastructure machine
// annotation that stores the name of the infrastructure template resource.
func (m *MachineManager) hasTemplateAnnotation() bool {
annotations := m.Metal3Machine.ObjectMeta.GetAnnotations()
if annotations == nil {
return false
}
_, ok := annotations[clusterv1.TemplateClonedFromNameAnnotation]
return ok
}
// SetError sets the ErrorMessage and ErrorReason fields on the machine and logs
// the message. It assumes the reason is invalid configuration, since that is
// currently the only relevant MachineStatusError choice.
func (m *MachineManager) SetError(message string, reason capierrors.MachineStatusError) {
m.Metal3Machine.Status.FailureMessage = &message
m.Metal3Machine.Status.FailureReason = &reason
}
// SetConditionMetal3MachineToFalse sets Metal3Machine condition status to False.
func (m *MachineManager) SetConditionMetal3MachineToFalse(t clusterv1.ConditionType, reason string, severity clusterv1.ConditionSeverity, messageFormat string, messageArgs ...interface{}) {
conditions.MarkFalse(m.Metal3Machine, t, reason, severity, messageFormat, messageArgs...)
}
// SetConditionMetal3MachineToTrue sets Metal3Machine condition status to True.
func (m *MachineManager) SetConditionMetal3MachineToTrue(t clusterv1.ConditionType) {
conditions.MarkTrue(m.Metal3Machine, t)
}
// clearError removes the ErrorMessage from the machine's Status if set. Returns
// nil if ErrorMessage was already nil. Returns a RequeueAfterError if the
// machine was updated.
func (m *MachineManager) clearError() {
if m.Metal3Machine.Status.FailureMessage != nil || m.Metal3Machine.Status.FailureReason != nil {
m.Metal3Machine.Status.FailureMessage = nil
m.Metal3Machine.Status.FailureReason = nil
}
}
// updateMachineStatus updates a Metal3Machine object's status.
func (m *MachineManager) updateMachineStatus(ctx context.Context, host *bmh.BareMetalHost) error {
addrs := m.nodeAddresses(host)
metal3MachineOld := m.Metal3Machine.DeepCopy()
m.Metal3Machine.Status.Addresses = addrs
conditions.MarkTrue(m.Metal3Machine, capm3.AssociateBMHCondition)
if equality.Semantic.DeepEqual(m.Metal3Machine.Status, metal3MachineOld.Status) {
// Status did not change
return nil
}
now := metav1.Now()
m.Metal3Machine.Status.LastUpdated = &now
return nil
}
// NodeAddresses returns a slice of corev1.NodeAddress objects for a
// given Metal3 machine.
func (m *MachineManager) nodeAddresses(host *bmh.BareMetalHost) []clusterv1.MachineAddress {
addrs := []clusterv1.MachineAddress{}
// If the host is nil or we have no hw details, return an empty address array.
if host == nil || host.Status.HardwareDetails == nil {
return addrs
}
for _, nic := range host.Status.HardwareDetails.NIC {
address := clusterv1.MachineAddress{
Type: clusterv1.MachineInternalIP,
Address: nic.IP,
}
addrs = append(addrs, address)
}
if host.Status.HardwareDetails.Hostname != "" {
addrs = append(addrs, clusterv1.MachineAddress{
Type: clusterv1.MachineHostName,
Address: host.Status.HardwareDetails.Hostname,
})
addrs = append(addrs, clusterv1.MachineAddress{
Type: clusterv1.MachineInternalDNS,
Address: host.Status.HardwareDetails.Hostname,
})
}
return addrs
}
// GetProviderIDAndBMHID returns providerID and bmhID.
func (m *MachineManager) GetProviderIDAndBMHID() (string, *string) {
providerID := m.Metal3Machine.Spec.ProviderID
if providerID == nil {
return "", nil
}
if strings.Contains(*providerID, ProviderIDPrefix) {
bmhID := strings.TrimPrefix(*providerID, ProviderIDPrefix)
return *providerID, pointer.StringPtr(parseProviderID(bmhID))
}
return *providerID, pointer.StringPtr(parseProviderID(*providerID))
}
// ClientGetter prototype.
type ClientGetter func(ctx context.Context, c client.Client, cluster *clusterv1.Cluster) (clientcorev1.CoreV1Interface, error)
// SetNodeProviderID sets the metal3 provider ID on the kubernetes node.
func (m *MachineManager) SetNodeProviderID(ctx context.Context, bmhID, providerID string, clientFactory ClientGetter) error {
if !m.Metal3Cluster.Spec.NoCloudProvider {
return nil
}
corev1Remote, err := clientFactory(ctx, m.client, m.Cluster)
if err != nil {
return errors.Wrap(err, "Error creating a remote client")
}
nodeLabel := fmt.Sprintf("%s=%v", ProviderLabelPrefix, bmhID)
nodes, nodesCount, err := m.getNodesWithLabel(ctx, nodeLabel, clientFactory)
if err != nil {
m.Log.Info("error retrieving node, requeuing")
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
if nodesCount == 0 {
// The node could either be still running cloud-init or have been
// deleted manually. TODO: handle a manual deletion case.
m.Log.Info("Target node is not found, requeuing")
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
for _, node := range nodes.Items {
node := node
if node.Spec.ProviderID == providerID {
continue
}
node.Spec.ProviderID = providerID
_, err = corev1Remote.Nodes().Update(ctx, &node, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "unable to update the target node")
}
}
m.Log.Info("ProviderID set on target node")
return nil
}
// SetProviderID sets the metal3 provider ID on the Metal3Machine.
func (m *MachineManager) SetProviderID(providerID string) {
m.Metal3Machine.Spec.ProviderID = &providerID
m.Metal3Machine.Status.Ready = true
m.SetConditionMetal3MachineToTrue(capm3.KubernetesNodeReadyCondition)
}
// SetOwnerRef adds an ownerreference to this Metal3Machine.
func (m *MachineManager) SetOwnerRef(refList []metav1.OwnerReference, controller bool) ([]metav1.OwnerReference, error) {
return setOwnerRefInList(refList, controller, m.Metal3Machine.TypeMeta,
m.Metal3Machine.ObjectMeta,
)
}
// DeleteOwnerRef removes the ownerreference to this Metal3Machine.
func (m *MachineManager) DeleteOwnerRef(refList []metav1.OwnerReference) ([]metav1.OwnerReference, error) {
return deleteOwnerRefFromList(refList, m.Metal3Machine.TypeMeta,
m.Metal3Machine.ObjectMeta,
)
}
// DeleteOwnerRefFromList removes the ownerreference to this Metal3Machine.
func deleteOwnerRefFromList(refList []metav1.OwnerReference,
objType metav1.TypeMeta, objMeta metav1.ObjectMeta,
) ([]metav1.OwnerReference, error) {
if len(refList) == 0 {
return refList, nil
}
index, err := findOwnerRefFromList(refList, objType, objMeta)
if err != nil {
if ok := errors.As(err, ¬FoundErr); !ok {
return nil, err
}
return refList, nil
}
if len(refList) == 1 {
return []metav1.OwnerReference{}, nil
}
refListLen := len(refList) - 1
refList[index] = refList[refListLen]
refList, err = deleteOwnerRefFromList(refList[:refListLen-1], objType, objMeta)
if err != nil {
return nil, err
}
return refList, nil
}
// FindOwnerRef checks if an ownerreference to this Metal3Machine exists
// and returns the index.
func (m *MachineManager) FindOwnerRef(refList []metav1.OwnerReference) (int, error) {
return findOwnerRefFromList(refList, m.Metal3Machine.TypeMeta,
m.Metal3Machine.ObjectMeta,
)
}
// SetOwnerRef adds an ownerreference to this Metal3Machine.
func setOwnerRefInList(refList []metav1.OwnerReference, controller bool,
objType metav1.TypeMeta, objMeta metav1.ObjectMeta,
) ([]metav1.OwnerReference, error) {
index, err := findOwnerRefFromList(refList, objType, objMeta)
if err != nil {
if ok := errors.As(err, ¬FoundErr); !ok {
return nil, err
}
refList = append(refList, metav1.OwnerReference{
APIVersion: objType.APIVersion,
Kind: objType.Kind,
Name: objMeta.Name,
UID: objMeta.UID,
Controller: pointer.BoolPtr(controller),
})
} else {
// The UID and the APIVersion might change due to move or version upgrade.
refList[index].APIVersion = objType.APIVersion
refList[index].UID = objMeta.UID
refList[index].Controller = pointer.BoolPtr(controller)
}
return refList, nil
}
// findOwnerRefFromList finds OwnerRef to this Metal3Machine.
func findOwnerRefFromList(refList []metav1.OwnerReference, objType metav1.TypeMeta,
objMeta metav1.ObjectMeta,
) (int, error) {
for i, curOwnerRef := range refList {
aGV, err := schema.ParseGroupVersion(curOwnerRef.APIVersion)
if err != nil {
return 0, err
}
bGV, err := schema.ParseGroupVersion(objType.APIVersion)
if err != nil {
return 0, err
}
// not matching on UID since when pivoting it might change.
// Not matching on API version as this might change.
if curOwnerRef.Name == objMeta.Name &&
curOwnerRef.Kind == objType.Kind &&
aGV.Group == bGV.Group {
return i, nil
}
}
return 0, &NotFoundError{}
}
// AssociateM3Metadata fetches the Metal3DataTemplate object and sets the
// owner references.
func (m *MachineManager) AssociateM3Metadata(ctx context.Context) error {
// If the secrets were provided by the user, use them.
if m.Metal3Machine.Spec.MetaData != nil {
m.Metal3Machine.Status.MetaData = m.Metal3Machine.Spec.MetaData
}
if m.Metal3Machine.Spec.NetworkData != nil {
m.Metal3Machine.Status.NetworkData = m.Metal3Machine.Spec.NetworkData
}
// If we have RenderedData set already, it means that the owner reference was
// already set.
if m.Metal3Machine.Status.RenderedData != nil {
return nil
}
if m.Metal3Machine.Spec.DataTemplate == nil {
return nil
}
if m.Metal3Machine.Spec.DataTemplate.Namespace == "" {
m.Metal3Machine.Spec.DataTemplate.Namespace = m.Metal3Machine.Namespace
}
_, err := fetchM3DataClaim(ctx, m.client, m.Log,
m.Metal3Machine.Name, m.Metal3Machine.Namespace,
)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
return err
}
} else {
return nil
}
dataClaim := &capm3.Metal3DataClaim{
ObjectMeta: metav1.ObjectMeta{
Name: m.Metal3Machine.Name,
Namespace: m.Metal3Machine.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: m.Metal3Machine.APIVersion,
Kind: m.Metal3Machine.Kind,
Name: m.Metal3Machine.Name,
UID: m.Metal3Machine.UID,
Controller: pointer.BoolPtr(true),
},
},
Labels: m.Metal3Machine.Labels,
},
Spec: capm3.Metal3DataClaimSpec{
Template: *m.Metal3Machine.Spec.DataTemplate,
},
}
err = createObject(ctx, m.client, dataClaim)
if err != nil {
return err
}
return nil
}
// WaitForM3Metadata fetches the Metal3DataTemplate object and sets the
// owner references.
func (m *MachineManager) WaitForM3Metadata(ctx context.Context) error {
// If we do not have RenderedData set yet, try to find it in
// Metal3DataTemplate. If it is not there yet, it means that the reconciliation
// of Metal3DataTemplate did not yet complete, requeue.
if m.Metal3Machine.Status.RenderedData == nil {
if m.Metal3Machine.Spec.DataTemplate == nil {
return nil
}
if m.Metal3Machine.Spec.DataTemplate.Namespace == "" {
m.Metal3Machine.Spec.DataTemplate.Namespace = m.Metal3Machine.Namespace
}
metal3DataClaim, err := fetchM3DataClaim(ctx, m.client, m.Log,
m.Metal3Machine.Name, m.Metal3Machine.Namespace,
)
if err != nil {
return err
}
if metal3DataClaim == nil {
return &RequeueAfterError{}
}
if metal3DataClaim.Status.RenderedData != nil &&
metal3DataClaim.Status.RenderedData.Name != "" {
m.Metal3Machine.Status.RenderedData = metal3DataClaim.Status.RenderedData
} else {
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
}
// Fetch the Metal3Data.
metal3Data, err := fetchM3Data(ctx, m.client, m.Log,
m.Metal3Machine.Status.RenderedData.Name, m.Metal3Machine.Namespace,
)
if err != nil {
return err
}
if metal3Data == nil {
return errors.New("Unexpected nil rendered data")
}
// If it is not ready yet, wait.
if !metal3Data.Status.Ready {
// Secret generation not ready
return &RequeueAfterError{RequeueAfter: requeueAfter}
}
// Get the secrets if given in Metal3Data and not already set.
if m.Metal3Machine.Status.MetaData == nil &&
metal3Data.Spec.MetaData != nil {
if metal3Data.Spec.MetaData.Name != "" {
m.Metal3Machine.Status.MetaData = &corev1.SecretReference{
Name: metal3Data.Spec.MetaData.Name,
Namespace: metal3Data.Namespace,
}
}
}
if m.Metal3Machine.Status.NetworkData == nil &&
metal3Data.Spec.NetworkData != nil {
if metal3Data.Spec.NetworkData.Name != "" {
m.Metal3Machine.Status.NetworkData = &corev1.SecretReference{
Name: metal3Data.Spec.NetworkData.Name,
Namespace: metal3Data.Namespace,
}
}
}
return nil
}
// DissociateM3Metadata removes machine from OwnerReferences of meta3DataTemplate, on failure requeue.
func (m *MachineManager) DissociateM3Metadata(ctx context.Context) error {
if m.Metal3Machine.Status.MetaData != nil && m.Metal3Machine.Spec.MetaData == nil {
m.Metal3Machine.Status.MetaData = nil
}
if m.Metal3Machine.Status.NetworkData != nil && m.Metal3Machine.Spec.NetworkData == nil {
m.Metal3Machine.Status.NetworkData = nil
}
m.Metal3Machine.Status.RenderedData = nil
// Get the Metal3DataClaim object.
metal3DataClaim, err := fetchM3DataClaim(ctx, m.client, m.Log,
m.Metal3Machine.Name, m.Metal3Machine.Namespace,
)
if err != nil {
if ok := errors.As(err, &hasRequeueAfterError); !ok {
return err
}
return nil
}
if metal3DataClaim == nil {
return nil
}
return deleteObject(ctx, m.client, metal3DataClaim)
}
// getKubeadmControlPlaneName retrieves the KubeadmControlPlane object corresponding to the CAPI machine.
func (m *MachineManager) getKubeadmControlPlaneName(ctx context.Context) (string, error) {
m.Log.Info("Fetching KubeadmControlPlane name")
if m.Machine == nil {
return "", errors.New("Could not find corresponding machine object")
}
if m.Machine.ObjectMeta.OwnerReferences == nil {
return "", errors.New("Machine owner reference is not populated")
}
for _, mOwnerRef := range m.Machine.ObjectMeta.OwnerReferences {
if mOwnerRef.Kind != "KubeadmControlPlane" {
continue
}
aGV, err := schema.ParseGroupVersion(mOwnerRef.APIVersion)
if err != nil {
return "", errors.New("Failed to parse the group and version")
}
if aGV.Group != controlplanev1.GroupVersion.Group {
continue
}
// adding prefix to KubeadmControlPlane name in order to be able to differentiate
// KubeadmControlPlane and MachineDeployment in case they have the same names set in the cluster.
m.Log.Info(fmt.Sprintf("Fetched KubeadmControlPlane name %v", "kcp-"+mOwnerRef.Name))
return "kcp-" + mOwnerRef.Name, nil
}
return "", errors.New("KubeadmControlPlane name is not found")
}
// getMachineDeploymentName retrieves the MachineDeployment object name corresponding to the MachineSet.
func (m *MachineManager) getMachineDeploymentName(ctx context.Context) (string, error) {
m.Log.Info("Fetching MachineDeployment name")
// Fetch MachineSet.
m.Log.Info("Fetching MachineSet first to find corresponding MachineDeployment later")
machineSet, err := m.getMachineSet(ctx)
if err != nil {
return "", err
}
if machineSet.ObjectMeta.OwnerReferences == nil {
return "", errors.New("Machineset owner reference is not populated")
}
for _, msOwnerRef := range machineSet.ObjectMeta.OwnerReferences {
if msOwnerRef.Kind != "MachineDeployment" {
continue
}
aGV, err := schema.ParseGroupVersion(msOwnerRef.APIVersion)
if err != nil {
return "", errors.New("Failed to parse the group and version")
}
if aGV.Group != clusterv1.GroupVersion.Group {
continue
}
// adding prefix to MachineDeployment name in order to be able to differentiate
// MachineDeployment and KubeadmControlPlane in case they have the same names set in the cluster.
m.Log.Info(fmt.Sprintf("Fetched MachineDeployment name %v", "md-"+msOwnerRef.Name))
return "md-" + msOwnerRef.Name, nil
}
return "", errors.New("MachineDeployment name is not found")
}
// getMachineSet retrieves the MachineSet object corresponding to the CAPI machine.
func (m *MachineManager) getMachineSet(ctx context.Context) (*clusterv1.MachineSet, error) {
m.Log.Info("Fetching MachineSet name")
// Get list of MachineSets.
machineSets := &clusterv1.MachineSetList{}
if m.Machine == nil {
return nil, errors.New("Could not find corresponding machine object")
}
if m.isControlPlane() {
return nil, errors.New("Machine is controlplane, MachineSet can not be associated with it")
}
if m.Machine.ObjectMeta.OwnerReferences == nil {
return nil, errors.New("Machine owner reference is not populated")
}
if err := m.client.List(ctx, machineSets, client.InNamespace(m.Machine.Namespace)); err != nil {
return nil, err
}
// Iterate over MachineSets list and find MachineSet which references specific machine.
for index := range machineSets.Items {
machineset := &machineSets.Items[index]
for _, mOwnerRef := range m.Machine.ObjectMeta.OwnerReferences {
if mOwnerRef.Kind != machineset.Kind {
continue
}
if mOwnerRef.APIVersion != machineset.APIVersion {
continue
}
if mOwnerRef.UID != machineset.UID {
continue
}
if mOwnerRef.Name == machineset.Name {
m.Log.Info(fmt.Sprintf("Found MachineSet %v corresponding to machine", machineset.Name))
return machineset, nil
}
}
}
return nil, errors.New("MachineSet is not found")
}
// getNodesWithLabel gets kubernetes nodes with a given label.
func (m *MachineManager) getNodesWithLabel(ctx context.Context, nodeLabel string, clientFactory ClientGetter) (*corev1.NodeList, int, error) {
corev1Remote, err := clientFactory(ctx, m.client, m.Cluster)
if err != nil {
return nil, 0, errors.Wrap(err, "Error creating a remote client")
}
nodesCount := 0
nodes, err := corev1Remote.Nodes().List(ctx, metav1.ListOptions{
LabelSelector: nodeLabel,
})
if err != nil {
m.Log.Info(fmt.Sprintf("error while retrieving nodes with label (%s): %v", nodeLabel, err))
return nil, 0, err
}
if nodes != nil {
nodesCount = len(nodes.Items)
}
return nodes, nodesCount, err
}
|
[
"\"CAPM3_FAST_TRACK\""
] |
[] |
[
"CAPM3_FAST_TRACK"
] |
[]
|
["CAPM3_FAST_TRACK"]
|
go
| 1 | 0 | |
handlers_drive.go
|
package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/prasmussen/gdrive/auth"
"github.com/prasmussen/gdrive/cli"
"github.com/prasmussen/gdrive/drive"
)
const ClientId = "285134468872-l86fg562da0uidsija1p96bjalattoh0.apps.googleusercontent.com"
const ClientSecret = "PWNpbdrL0G17OxoL42OslqUf"
const TokenFilename = "token_v2.json"
const DefaultCacheFileName = "file_cache.json"
func listHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).List(drive.ListFilesArgs{
Out: os.Stdout,
MaxFiles: args.Int64("maxFiles"),
NameWidth: args.Int64("nameWidth"),
Query: args.String("query"),
SortOrder: args.String("sortOrder"),
SkipHeader: args.Bool("skipHeader"),
SizeInBytes: args.Bool("sizeInBytes"),
AbsPath: args.Bool("absPath"),
})
checkErr(err)
}
func listChangesHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListChanges(drive.ListChangesArgs{
Out: os.Stdout,
PageToken: args.String("pageToken"),
MaxChanges: args.Int64("maxChanges"),
Now: args.Bool("now"),
NameWidth: args.Int64("nameWidth"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func downloadHandler(ctx cli.Context) {
args := ctx.Args()
checkDownloadArgs(args)
err := newDrive(args).Download(drive.DownloadArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Path: args.String("path"),
Delete: args.Bool("delete"),
Recursive: args.Bool("recursive"),
Stdout: args.Bool("stdout"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func downloadQueryHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
Out: os.Stdout,
Query: args.String("query"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Recursive: args.Bool("recursive"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func downloadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func downloadRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
Force: args.Bool("force"),
Stdout: args.Bool("stdout"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadHandler(ctx cli.Context) {
args := ctx.Args()
checkUploadArgs(args)
err := newDrive(args).Upload(drive.UploadArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Recursive: args.Bool("recursive"),
Share: args.Bool("share"),
Delete: args.Bool("delete"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadStdinHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).UploadStream(drive.UploadStreamArgs{
Out: os.Stdout,
In: os.Stdin,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Share: args.Bool("share"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func uploadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).UploadSync(drive.UploadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func updateHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Update(drive.UpdateArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Progress: progressWriter(args.Bool("noProgress")),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func infoHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Info(drive.FileInfoArgs{
Out: os.Stdout,
Id: args.String("fileId"),
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func importHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Import(drive.ImportArgs{
Mime: args.String("mime"),
Out: os.Stdout,
Path: args.String("path"),
Parents: args.StringSlice("parent"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func exportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Export(drive.ExportArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Mime: args.String("mime"),
PrintMimes: args.Bool("printMimes"),
Force: args.Bool("force"),
})
checkErr(err)
}
func listRevisionsHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
Out: os.Stdout,
Id: args.String("fileId"),
NameWidth: args.Int64("nameWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func mkdirHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Mkdir(drive.MkdirArgs{
Out: os.Stdout,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
})
checkErr(err)
}
func shareHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Share(drive.ShareArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
Role: args.String("role"),
Type: args.String("type"),
Email: args.String("email"),
Domain: args.String("domain"),
Discoverable: args.Bool("discoverable"),
})
checkErr(err)
}
func shareListHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
})
checkErr(err)
}
func shareRevokeHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
PermissionId: args.String("permissionId"),
})
checkErr(err)
}
func deleteHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Delete(drive.DeleteArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Recursive: args.Bool("recursive"),
})
checkErr(err)
}
func listSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListSync(drive.ListSyncArgs{
Out: os.Stdout,
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func listRecursiveSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
Out: os.Stdout,
RootId: args.String("fileId"),
SkipHeader: args.Bool("skipHeader"),
PathWidth: args.Int64("pathWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SortOrder: args.String("sortOrder"),
})
checkErr(err)
}
func deleteRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
})
checkErr(err)
}
func aboutHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).About(drive.AboutArgs{
Out: os.Stdout,
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func aboutImportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutImport(drive.AboutImportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func aboutExportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutExport(drive.AboutExportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func getOauthClient(args cli.Arguments) (*http.Client, error) {
if args.String("refreshToken") != "" && args.String("accessToken") != "" {
ExitF("Access token not needed when refresh token is provided")
}
if args.String("refreshToken") != "" {
return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil
}
if args.String("accessToken") != "" {
return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil
}
configDir := getConfigDir(args)
if args.String("serviceAccount") != "" {
serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount"))
serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath)
if err != nil {
return nil, err
}
return serviceAccountClient, nil
}
tokenPath := ConfigFilePath(configDir, TokenFilename)
return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt)
}
func getConfigDir(args cli.Arguments) string {
// Use dir from environment var if present
if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
return os.Getenv("GDRIVE_CONFIG_DIR")
}
return args.String("configDir")
}
func newDrive(args cli.Arguments) *drive.Drive {
oauth, err := getOauthClient(args)
if err != nil {
ExitF("Failed getting oauth client: %s", err.Error())
}
client, err := drive.New(oauth)
if err != nil {
ExitF("Failed getting drive: %s", err.Error())
}
return client
}
func authCodePrompt(url string) func() string {
return func() string {
fmt.Println("Authentication needed")
fmt.Println("Go to the following url in your browser:")
fmt.Printf("%s\n\n", url)
fmt.Print("Enter verification code: ")
var code string
if _, err := fmt.Scan(&code); err != nil {
fmt.Printf("Failed reading code: %s", err.Error())
}
return code
}
}
func progressWriter(discard bool) io.Writer {
if discard {
return ioutil.Discard
}
return os.Stderr
}
func durationInSeconds(seconds int64) time.Duration {
return time.Second * time.Duration(seconds)
}
func conflictResolution(args cli.Arguments) drive.ConflictResolution {
keepLocal := args.Bool("keepLocal")
keepRemote := args.Bool("keepRemote")
keepLargest := args.Bool("keepLargest")
if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
ExitF("Only one conflict resolution flag can be given")
}
if keepLocal {
return drive.KeepLocal
}
if keepRemote {
return drive.KeepRemote
}
if keepLargest {
return drive.KeepLargest
}
return drive.NoResolution
}
func checkUploadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive uploads")
}
if args.Bool("recursive") && args.Bool("share") {
ExitF("--share is not allowed for recursive uploads")
}
}
func checkDownloadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive downloads")
}
}
|
[
"\"GDRIVE_CONFIG_DIR\"",
"\"GDRIVE_CONFIG_DIR\""
] |
[] |
[
"GDRIVE_CONFIG_DIR"
] |
[]
|
["GDRIVE_CONFIG_DIR"]
|
go
| 1 | 0 | |
tests/stores/test_advanced_stores.py
|
# coding: utf-8
"""
Tests for advanced stores
"""
import os
import shutil
import signal
import subprocess
import tempfile
import time
from unittest.mock import patch
from uuid import uuid4
import pytest
from mongogrant import Client
from mongogrant.client import check, seed
from mongogrant.config import Config
from pymongo import MongoClient
from pymongo.collection import Collection
from maggma.core import StoreError
from maggma.stores import (
AliasingStore,
MemoryStore,
MongograntStore,
MongoStore,
SandboxStore,
VaultStore,
)
from maggma.stores.advanced_stores import substitute
@pytest.fixture
def mongostore():
store = MongoStore("maggma_test", "test")
store.connect()
yield store
store._collection.drop()
@pytest.fixture("module")
def mgrant_server():
# TODO: This is whacked code that starts a mongo server. How do we fix this?
_, config_path = tempfile.mkstemp()
_, mdlogpath = tempfile.mkstemp()
mdpath = tempfile.mkdtemp()
mdport = 27020
if not os.getenv("CONTINUOUS_INTEGRATION"):
basecmd = (
f"mongod --port {mdport} --dbpath {mdpath} --quiet --logpath {mdlogpath} "
"--bind_ip_all --auth"
)
mongod_process = subprocess.Popen(basecmd, shell=True, start_new_session=True)
time.sleep(5)
client = MongoClient(port=mdport)
client.admin.command(
"createUser", "mongoadmin", pwd="mongoadminpass", roles=["root"]
)
client.close()
else:
pytest.skip("Disabling mongogrant tests on CI for now")
dbname = "test_" + uuid4().hex
db = MongoClient(f"mongodb://mongoadmin:[email protected]:{mdport}/admin")[
dbname
]
db.command("createUser", "reader", pwd="readerpass", roles=["read"])
db.command("createUser", "writer", pwd="writerpass", roles=["readWrite"])
db.client.close()
# Yields the fixture to use
yield config_path, mdport, dbname
if not (os.getenv("CONTINUOUS_INTEGRATION") and os.getenv("TRAVIS")):
os.killpg(os.getpgid(mongod_process.pid), signal.SIGTERM)
os.waitpid(mongod_process.pid, 0)
os.remove(config_path)
shutil.rmtree(mdpath)
os.remove(mdlogpath)
@pytest.fixture("module")
def mgrant_user(mgrant_server):
config_path, mdport, dbname = mgrant_server
config = Config(check=check, path=config_path, seed=seed())
client = Client(config)
client.set_auth(
host=f"localhost:{mdport}",
db=dbname,
role="read",
username="reader",
password="readerpass",
)
client.set_auth(
host=f"localhost:{mdport}",
db=dbname,
role="readWrite",
username="writer",
password="writerpass",
)
client.set_alias("testhost", f"localhost:{mdport}", which="host")
client.set_alias("testdb", dbname, which="db")
return client
def connected_user(store):
return store._collection.database.command("connectionStatus")["authInfo"][
"authenticatedUsers"
][0]["user"]
def test_mgrant_init():
with pytest.raises(StoreError):
store = MongograntStore("", "", username="")
with pytest.raises(ValueError):
store = MongograntStore("", "")
store.connect()
def test_mgrant_connect(mgrant_server, mgrant_user):
config_path, mdport, dbname = mgrant_server
assert mgrant_user is not None
store = MongograntStore(
"ro:testhost/testdb", "tasks", mgclient_config_path=config_path
)
store.connect()
assert isinstance(store._collection, Collection)
assert connected_user(store) == "reader"
store = MongograntStore(
"rw:testhost/testdb", "tasks", mgclient_config_path=config_path
)
store.connect()
assert isinstance(store._collection, Collection)
assert connected_user(store) == "writer"
def test_mgrant_differences():
with pytest.raises(ValueError):
MongograntStore.from_db_file("")
with pytest.raises(ValueError):
MongograntStore.from_collection("")
def test_mgrant_equal(mgrant_server, mgrant_user):
config_path, mdport, dbname = mgrant_server
assert mgrant_user is not None
store1 = MongograntStore(
"ro:testhost/testdb", "tasks", mgclient_config_path=config_path
)
store1.connect()
store2 = MongograntStore(
"ro:testhost/testdb", "tasks", mgclient_config_path=config_path
)
store3 = MongograntStore(
"ro:testhost/testdb", "test", mgclient_config_path=config_path
)
store2.connect()
assert store1 == store2
assert store1 != store3
def vault_store():
with patch("hvac.Client") as mock:
instance = mock.return_value
instance.auth_github.return_value = True
instance.is_authenticated.return_value = True
instance.read.return_value = {
"wrap_info": None,
"request_id": "2c72c063-2452-d1cd-19a2-91163c7395f7",
"data": {
"value": '{"db": "mg_core_prod", "host": "matgen2.lbl.gov", "username": "test", "password": "pass"}'
},
"auth": None,
"warnings": None,
"renewable": False,
"lease_duration": 2764800,
"lease_id": "",
}
v = VaultStore("test_coll", "secret/matgen/maggma")
return v
def test_vault_init():
"""
Test initing a vault store using a mock hvac client
"""
os.environ["VAULT_ADDR"] = "https://fake:8200/"
os.environ["VAULT_TOKEN"] = "dummy"
# Just test that we successfully instantiated
v = vault_store()
assert isinstance(v, MongoStore)
def test_vault_github_token():
"""
Test using VaultStore with GITHUB_TOKEN and mock hvac
"""
# Save token in env
os.environ["VAULT_ADDR"] = "https://fake:8200/"
os.environ["GITHUB_TOKEN"] = "dummy"
v = vault_store()
# Just test that we successfully instantiated
assert isinstance(v, MongoStore)
def test_vault_missing_env():
"""
Test VaultStore should raise an error if environment is not set
"""
del os.environ["VAULT_TOKEN"]
del os.environ["VAULT_ADDR"]
del os.environ["GITHUB_TOKEN"]
# Create should raise an error
with pytest.raises(RuntimeError):
vault_store()
@pytest.fixture
def alias_store():
memorystore = MemoryStore("test")
memorystore.connect()
alias_store = AliasingStore(memorystore, {"a": "b", "c.d": "e", "f": "g.h"})
return alias_store
def test_alias_count(alias_store):
d = [{"b": 1}, {"e": 2}, {"g": {"h": 3}}]
alias_store.store._collection.insert_many(d)
assert alias_store.count({"a": 1}) == 1
def test_aliasing_query(alias_store):
d = [{"b": 1}, {"e": 2}, {"g": {"h": 3}}]
alias_store.store._collection.insert_many(d)
assert "a" in list(alias_store.query(criteria={"a": {"$exists": 1}}))[0]
assert "c" in list(alias_store.query(criteria={"c.d": {"$exists": 1}}))[0]
assert "d" in list(alias_store.query(criteria={"c.d": {"$exists": 1}}))[0].get(
"c", {}
)
assert "f" in list(alias_store.query(criteria={"f": {"$exists": 1}}))[0]
def test_aliasing_update(alias_store):
alias_store.update(
[
{"task_id": "mp-3", "a": 4},
{"task_id": "mp-4", "c": {"d": 5}},
{"task_id": "mp-5", "f": 6},
]
)
assert list(alias_store.query(criteria={"task_id": "mp-3"}))[0]["a"] == 4
assert list(alias_store.query(criteria={"task_id": "mp-4"}))[0]["c"]["d"] == 5
assert list(alias_store.query(criteria={"task_id": "mp-5"}))[0]["f"] == 6
assert list(alias_store.store.query(criteria={"task_id": "mp-3"}))[0]["b"] == 4
assert list(alias_store.store.query(criteria={"task_id": "mp-4"}))[0]["e"] == 5
assert list(alias_store.store.query(criteria={"task_id": "mp-5"}))[0]["g"]["h"] == 6
def test_aliasing_remove_docs(alias_store):
alias_store.update(
[
{"task_id": "mp-3", "a": 4},
{"task_id": "mp-4", "c": {"d": 5}},
{"task_id": "mp-5", "f": 6},
]
)
assert alias_store.query_one(criteria={"task_id": "mp-3"})
assert alias_store.query_one(criteria={"task_id": "mp-4"})
assert alias_store.query_one(criteria={"task_id": "mp-5"})
alias_store.remove_docs({"a": 4})
assert alias_store.query_one(criteria={"task_id": "mp-3"}) is None
def test_aliasing_substitute(alias_store):
aliases = {"a": "b", "c.d": "e", "f": "g.h"}
d = {"b": 1}
substitute(d, aliases)
assert "a" in d
d = {"e": 1}
substitute(d, aliases)
assert "c" in d
assert "d" in d.get("c", {})
d = {"g": {"h": 4}}
substitute(d, aliases)
assert "f" in d
d = None
substitute(d, aliases)
assert d is None
def test_aliasing_distinct(alias_store):
d = [{"b": 1}, {"e": 2}, {"g": {"h": 3}}]
alias_store.store._collection.insert_many(d)
assert alias_store.distinct("a") == [1]
assert alias_store.distinct("c.d") == [2]
assert alias_store.distinct("f") == [3]
@pytest.fixture
def sandbox_store():
memstore = MemoryStore()
store = SandboxStore(memstore, sandbox="test")
store.connect()
return store
def test_sandbox_count(sandbox_store):
sandbox_store.collection.insert_one({"a": 1, "b": 2, "c": 3})
assert sandbox_store.count({"a": 1}) == 1
sandbox_store.collection.insert_one({"a": 1, "b": 3, "sbxn": ["test"]})
assert sandbox_store.count({"a": 1}) == 2
def test_sandbox_query(sandbox_store):
sandbox_store.collection.insert_one({"a": 1, "b": 2, "c": 3})
assert sandbox_store.query_one(properties=["a"])["a"] == 1
sandbox_store.collection.insert_one({"a": 2, "b": 2, "sbxn": ["test"]})
assert sandbox_store.query_one(properties=["b"], criteria={"a": 2})["b"] == 2
sandbox_store.collection.insert_one({"a": 3, "b": 2, "sbxn": ["not_test"]})
assert sandbox_store.query_one(properties=["c"], criteria={"a": 3}) is None
def test_sandbox_distinct(sandbox_store):
sandbox_store.connect()
sandbox_store.collection.insert_one({"a": 1, "b": 2, "c": 3})
assert sandbox_store.distinct("a") == [1]
sandbox_store.collection.insert_one({"a": 4, "d": 5, "e": 6, "sbxn": ["test"]})
assert sandbox_store.distinct("a")[1] == 4
sandbox_store.collection.insert_one({"a": 7, "d": 8, "e": 9, "sbxn": ["not_test"]})
assert sandbox_store.distinct("a")[1] == 4
def test_sandbox_update(sandbox_store):
sandbox_store.connect()
sandbox_store.update([{"e": 6, "d": 4}], key="e")
assert (
next(sandbox_store.query(criteria={"d": {"$exists": 1}}, properties=["d"]))["d"]
== 4
)
assert sandbox_store.collection.find_one({"e": 6})["sbxn"] == ["test"]
sandbox_store.update([{"e": 7, "sbxn": ["core"]}], key="e")
assert set(sandbox_store.query_one(criteria={"e": 7})["sbxn"]) == {"test", "core"}
def test_sandbox_remove_docs(sandbox_store):
sandbox_store.connect()
sandbox_store.update([{"e": 6, "d": 4}], key="e")
sandbox_store.update([{"e": 7, "sbxn": ["core"]}], key="e")
assert sandbox_store.query_one(criteria={"d": {"$exists": 1}}, properties=["d"])
assert sandbox_store.query_one(criteria={"e": 7})
sandbox_store.remove_docs(criteria={"d": 4})
assert (
sandbox_store.query_one(criteria={"d": {"$exists": 1}}, properties=["d"])
is None
)
assert sandbox_store.query_one(criteria={"e": 7})
@pytest.fixture
def mgrantstore(mgrant_server, mgrant_user):
config_path, mdport, dbname = mgrant_server
assert mgrant_user is not None
store = MongograntStore(
"ro:testhost/testdb", "tasks", mgclient_config_path=config_path
)
store.connect()
return store
@pytest.fixture
def vaultstore():
os.environ["VAULT_ADDR"] = "https://fake:8200/"
os.environ["VAULT_TOKEN"] = "dummy"
# Just test that we successfully instantiated
v = vault_store()
return v
def test_eq_mgrant(mgrantstore, mongostore):
assert mgrantstore == mgrantstore
assert mgrantstore != mongostore
def test_eq(vaultstore, alias_store, sandbox_store):
assert alias_store == alias_store
assert sandbox_store == sandbox_store
assert vaultstore == vaultstore
assert sandbox_store != alias_store
assert alias_store != vaultstore
assert vaultstore != sandbox_store
|
[] |
[] |
[
"VAULT_TOKEN",
"CONTINUOUS_INTEGRATION",
"TRAVIS",
"VAULT_ADDR",
"GITHUB_TOKEN"
] |
[]
|
["VAULT_TOKEN", "CONTINUOUS_INTEGRATION", "TRAVIS", "VAULT_ADDR", "GITHUB_TOKEN"]
|
python
| 5 | 0 | |
internal/dao/dao_test.go
|
package dao
import (
"context"
"flag"
"os"
"testing"
"github.com/go-kratos/kratos/pkg/conf/paladin"
"github.com/go-kratos/kratos/pkg/testing/lich"
)
var d *dao
var ctx = context.Background()
func _TestMain(m *testing.M) {
flag.Set("conf", "../../test")
flag.Set("f", "../../test/docker-compose.yaml")
flag.Parse()
disableLich := os.Getenv("DISABLE_LICH") != ""
if !disableLich {
if err := lich.Setup(); err != nil {
panic(err)
}
}
var err error
if err = paladin.Init(); err != nil {
panic(err)
}
var cf func()
if d, cf, err = newTestDao(); err != nil {
panic(err)
}
ret := m.Run()
cf()
if !disableLich {
_ = lich.Teardown()
}
os.Exit(ret)
}
func TestMain(m *testing.M) {
flag.Set("conf", "../../configs")
flag.Parse()
var err error
if err = paladin.Init(); err != nil {
panic(err)
}
var cf func()
if d, cf, err = newTestDao(); err != nil {
panic(err)
}
ret := m.Run()
cf()
os.Exit(ret)
}
|
[
"\"DISABLE_LICH\""
] |
[] |
[
"DISABLE_LICH"
] |
[]
|
["DISABLE_LICH"]
|
go
| 1 | 0 | |
scripts/run_audio.py
|
import os
import wandb
from copy import deepcopy
from src.systems import audio_systems
from src.utils.utils import load_json
from src.utils.setup import process_config
import random, torch, numpy
import pytorch_lightning as pl
SYSTEM = {
'PretrainExpertInstDiscSystem': audio_systems.PretrainExpertInstDiscSystem,
'PretrainExpertSimCLRSystem': audio_systems.PretrainExpertSimCLRSystem,
'PretrainViewMakerSimCLRSystem': audio_systems.PretrainViewMakerSimCLRSystem,
'PretrainViewMakerInstDiscSystem': audio_systems.PretrainViewMakerInstDiscSystem,
# --
'TransferExpertAudioMNISTSystem': audio_systems.TransferExpertAudioMNISTSystem,
'TransferExpertGoogleSpeechCommandsSystem': audio_systems.TransferExpertGoogleSpeechCommandsSystem,
'TransferExpertFluentSpeechCommandsSystem': audio_systems.TransferExpertFluentSpeechCommandsSystem,
'TransferExpertLibriSpeechSystem': audio_systems.TransferExpertLibriSpeechSystem,
'TransferExpertVoxCeleb1System': audio_systems.TransferExpertVoxCeleb1System,
'TransferViewMakerAudioMNISTSystem': audio_systems.TransferViewMakerAudioMNISTSystem,
'TransferViewMakerGoogleSpeechCommandsSystem': audio_systems.TransferViewMakerGoogleSpeechCommandsSystem,
'TransferViewMakerFluentSpeechCommandsSystem': audio_systems.TransferViewMakerFluentSpeechCommandsSystem,
'TransferViewMakerLibriSpeechSystem': audio_systems.TransferViewMakerLibriSpeechSystem,
'TransferViewMakerVoxCeleb1System': audio_systems.TransferViewMakerVoxCeleb1System,
}
def run(args, gpu_device=None):
'''Run the Lightning system.
Args:
args
args.config_path: str, filepath to the config file
gpu_device: str or None, specifies GPU device as follows:
None: CPU (specified as null in config)
'cpu': CPU
'-1': All available GPUs
'0': GPU 0
'4': GPU 4
'0,3' GPUs 1 and 3
See: https://pytorch-lightning.readthedocs.io/en/latest/multi_gpu.html
'''
if gpu_device == 'cpu' or not gpu_device:
gpu_device = None
if args.caller_intent is not None:
# for harpervalley, we need to choose between different transfer tasks
config = process_config(args.config, exp_name_suffix=args.caller_intent)
config.data_params.caller_intent = args.caller_intent
else:
config = process_config(args.config)
# Only override if specified.
if gpu_device: config.gpu_device = gpu_device
seed_everything(config.seed)
SystemClass = SYSTEM[config.system]
system = SystemClass(config)
if config.optim_params.scheduler:
lr_callback = globals()[config.optim_params.scheduler](
initial_lr=config.optim_params.learning_rate,
max_epochs=config.num_epochs,
schedule=(
int(0.6*config.num_epochs),
int(0.8*config.num_epochs),
),
)
callbacks = [lr_callback]
else:
callbacks = None
# TODO: adjust period for saving checkpoints.
ckpt_callback = pl.callbacks.ModelCheckpoint(
os.path.join(config.exp_dir, 'checkpoints'),
save_top_k=-1,
period=1,
)
wandb.init(project='audio', entity='viewmaker', name=config.exp_name, config=config, sync_tensorboard=True)
trainer = pl.Trainer(
default_root_dir=config.exp_dir,
gpus=gpu_device,
# 'ddp' is usually faster, but we use 'dp' so the negative samples
# for the whole batch are used for the SimCLR loss
distributed_backend=config.distributed_backend or 'dp',
max_epochs=config.num_epochs,
min_epochs=config.num_epochs,
checkpoint_callback=ckpt_callback,
resume_from_checkpoint=args.ckpt or config.continue_from_checkpoint,
profiler=args.profiler,
precision=config.optim_params.precision or 32,
callbacks=callbacks,
val_check_interval=config.val_check_interval or 1.0,
limit_val_batches=config.limit_val_batches or 1.0,
num_sanity_val_steps=-1,
)
trainer.fit(system)
def seed_everything(seed):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
numpy.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, default='path to config file')
parser.add_argument('--caller-intent', type=str, default=None)
parser.add_argument('--gpu-device', type=str, default=None)
parser.add_argument('--profiler', action='store_true')
parser.add_argument('--ckpt', type=str, default=None)
args = parser.parse_args()
# Ensure it's a string, even if from an older config
gpu_device = str(args.gpu_device) if args.gpu_device else None
run(args, gpu_device=gpu_device)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
test/e2e/main_test.go
|
package e2e
import (
"fmt"
"os"
"testing"
"github.com/openshift/cluster-image-registry-operator/test/framework"
)
type bootstrapTestEnv struct {
client *framework.Clientset
}
func (te *bootstrapTestEnv) Client() *framework.Clientset { return te.client }
func (te *bootstrapTestEnv) Failed() bool { return false }
func (te *bootstrapTestEnv) Log(...interface{}) {}
func (te *bootstrapTestEnv) Logf(string, ...interface{}) {}
func (te *bootstrapTestEnv) Error(a ...interface{}) { panic(fmt.Sprint(a...)) }
func (te *bootstrapTestEnv) Errorf(f string, a ...interface{}) { panic(fmt.Sprintf(f, a...)) }
func (te *bootstrapTestEnv) Fatal(a ...interface{}) { panic(fmt.Sprint(a...)) }
func (te *bootstrapTestEnv) Fatalf(f string, a ...interface{}) { panic(fmt.Sprintf(f, a...)) }
func TestMain(m *testing.M) {
if os.Getenv("KUBERNETES_CONFIG") == "" {
kubeConfig := os.Getenv("KUBECONFIG")
if kubeConfig == "" {
kubeConfig = os.Getenv("HOME") + "/.kube/config"
}
os.Setenv("KUBERNETES_CONFIG", kubeConfig)
}
client, err := framework.NewClientset(nil)
if err != nil {
panic(err)
}
te := &bootstrapTestEnv{client: client}
framework.DisableCVOForOperator(te)
framework.RemoveImageRegistry(te)
os.Exit(m.Run())
}
|
[
"\"KUBERNETES_CONFIG\"",
"\"KUBECONFIG\"",
"\"HOME\""
] |
[] |
[
"KUBERNETES_CONFIG",
"HOME",
"KUBECONFIG"
] |
[]
|
["KUBERNETES_CONFIG", "HOME", "KUBECONFIG"]
|
go
| 3 | 0 | |
azurelinuxagent/distro/coreos/osutil.py
|
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import os
import re
import pwd
import shutil
import socket
import array
import struct
import fcntl
import time
import base64
import azurelinuxagent.logger as logger
import azurelinuxagent.utils.fileutil as fileutil
import azurelinuxagent.utils.shellutil as shellutil
import azurelinuxagent.utils.textutil as textutil
from azurelinuxagent.distro.default.osutil import DefaultOSUtil
class CoreOSUtil(DefaultOSUtil):
def __init__(self):
super(CoreOSUtil, self).__init__()
self.agent_conf_file_path = '/usr/share/oem/waagent.conf'
self.waagent_path='/usr/share/oem/bin/waagent'
self.python_path='/usr/share/oem/python/bin'
if 'PATH' in os.environ:
path = "{0}:{1}".format(os.environ['PATH'], self.python_path)
else:
path = self.python_path
os.environ['PATH'] = path
if 'PYTHONPATH' in os.environ:
py_path = os.environ['PYTHONPATH']
py_path = "{0}:{1}".format(py_path, self.waagent_path)
else:
py_path = self.waagent_path
os.environ['PYTHONPATH'] = py_path
def is_sys_user(self, username):
#User 'core' is not a sysuser
if username == 'core':
return False
return super(CoreOSUtil, self).is_sys_user(username)
def is_dhcp_enabled(self):
return True
def start_network(self) :
return shellutil.run("systemctl start systemd-networkd", chk_err=False)
def restart_if(self, iface):
shellutil.run("systemctl restart systemd-networkd")
def restart_ssh_service(self):
return shellutil.run("systemctl restart sshd", chk_err=False)
def stop_dhcp_service(self):
return shellutil.run("systemctl stop systemd-networkd", chk_err=False)
def start_dhcp_service(self):
return shellutil.run("systemctl start systemd-networkd", chk_err=False)
def start_agent_service(self):
return shellutil.run("systemctl start wagent", chk_err=False)
def stop_agent_service(self):
return shellutil.run("systemctl stop wagent", chk_err=False)
def get_dhcp_pid(self):
ret= shellutil.run_get_output("pidof systemd-networkd")
return ret[1] if ret[0] == 0 else None
def set_ssh_client_alive_interval(self):
#In CoreOS, /etc/sshd_config is mount readonly. Skip the setting
pass
def conf_sshd(self, disable_password):
#In CoreOS, /etc/sshd_config is mount readonly. Skip the setting
pass
|
[] |
[] |
[
"PATH",
"PYTHONPATH"
] |
[]
|
["PATH", "PYTHONPATH"]
|
python
| 2 | 0 | |
list-functions/handler.go
|
package function
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/openfaas/openfaas-cloud/sdk"
)
// Handle takes the functions which are built
// by buildshiprun and exposes the function object
// to be consumed by the dashboard so the function
// can be displayed
func Handle(req []byte) string {
user := string(req)
if len(user) == 0 {
if query, exists := os.LookupEnv("Http_Query"); exists {
vals, _ := url.ParseQuery(query)
userQuery := vals.Get("user")
if len(userQuery) > 0 {
user = userQuery
}
}
}
if len(user) == 0 {
return "User is required as POST or querystring i.e. ?user=alexellis."
}
c := http.Client{
Timeout: time.Second * 3,
}
gatewayURL := os.Getenv("gateway_url")
httpReq, _ := http.NewRequest(http.MethodGet, gatewayURL+"system/functions", nil)
addAuthErr := sdk.AddBasicAuth(httpReq)
if addAuthErr != nil {
log.Printf("Basic auth error %s", addAuthErr)
}
response, err := c.Do(httpReq)
filtered := []function{}
if err != nil {
log.Fatal(err)
}
defer response.Body.Close()
bodyBytes, bErr := ioutil.ReadAll(response.Body)
if bErr != nil {
log.Fatal(bErr)
}
if response.StatusCode != http.StatusOK {
log.Fatalf("unable to query functions, status: %d, message: %s", response.StatusCode, string(bodyBytes))
}
functions := []function{}
mErr := json.Unmarshal(bodyBytes, &functions)
if mErr != nil {
log.Fatal(mErr)
}
for _, fn := range functions {
for k, v := range fn.Labels {
if k == (sdk.FunctionLabelPrefix+"git-owner") && strings.EqualFold(v, user) {
// Hide internal-repo details
fn.Image = fn.Image[strings.Index(fn.Image, "/")+1:]
filtered = append(filtered, fn)
}
}
}
bytesOut, _ := json.Marshal(filtered)
return string(bytesOut)
}
type function struct {
Name string `json:"name"`
Image string `json:"image"`
InvocationCount float64 `json:"invocationCount"`
Replicas uint64 `json:"replicas"`
Labels map[string]string `json:"labels"`
Annotations map[string]string `json:"annotations"`
}
|
[
"\"gateway_url\""
] |
[] |
[
"gateway_url"
] |
[]
|
["gateway_url"]
|
go
| 1 | 0 | |
tools/src/main/java/org/usergrid/tools/ToolBase.java
|
/*******************************************************************************
* Copyright 2012 Apigee Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.usergrid.tools;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.usergrid.utils.JsonUtils.mapToFormattedJsonString;
import java.util.Properties;
import me.prettyprint.hector.testutils.EmbeddedServerHelper;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang.ClassUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.usergrid.management.ManagementService;
import org.usergrid.persistence.EntityManagerFactory;
import org.usergrid.persistence.cassandra.CassandraService;
import org.usergrid.persistence.cassandra.EntityManagerFactoryImpl;
import org.usergrid.persistence.cassandra.Setup;
import org.usergrid.services.ServiceManagerFactory;
public abstract class ToolBase {
public static final int MAX_ENTITY_FETCH = 100;
/** Verbose option: -v */
static final String VERBOSE = "v";
boolean isVerboseEnabled = false;
static final Logger logger = LoggerFactory
.getLogger(ToolBase.class);
/**
*
*/
protected static final String PATH_REPLACEMENT = "USERGIRD-PATH-BACKSLASH";
protected EmbeddedServerHelper embedded = null;
protected EntityManagerFactory emf;
protected ServiceManagerFactory smf;
protected ManagementService managementService;
protected Properties properties;
protected CassandraService cass;
public void startTool(String[] args) {
CommandLineParser parser = new GnuParser();
CommandLine line = null;
try {
line = parser.parse(createOptions(), args);
} catch (ParseException exp) {
printCliHelp("Parsing failed. Reason: " + exp.getMessage());
}
if (line == null) {
return;
}
if (line.hasOption("host")) {
System.setProperty("cassandra.url",
line.getOptionValue("host"));
}
try {
runTool(line);
} catch (Exception e) {
e.printStackTrace();
}
System.exit(0);
}
public void printCliHelp(String message) {
System.out.println(message);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("java -jar usergrid-tools-0.0.1-SNAPSHOT.jar "
+ getToolName(), createOptions());
System.exit(-1);
}
public String getToolName() {
return ClassUtils.getShortClassName(this.getClass());
}
@SuppressWarnings("static-access")
public Options createOptions() {
Option hostOption = OptionBuilder.withArgName("host").hasArg()
.withDescription("Cassandra host").create("host");
Option remoteOption = OptionBuilder.withDescription(
"Use remote Cassandra instance").create("remote");
Option verbose = OptionBuilder
.withDescription(
"Print on the console an echo of the content written to the file")
.create(VERBOSE);
Options options = new Options();
options.addOption(hostOption);
options.addOption(remoteOption);
options.addOption(verbose);
return options;
}
public void startEmbedded() throws Exception {
// assertNotNull(client);
String maven_opts = System.getenv("MAVEN_OPTS");
logger.info("Maven options: " + maven_opts);
logger.info("Starting Cassandra");
embedded = new EmbeddedServerHelper();
embedded.setup();
}
public void startSpring() {
// copy("/testApplicationContext.xml", TMP);
String[] locations = { "toolsApplicationContext.xml" };
ApplicationContext ac = new ClassPathXmlApplicationContext(locations);
AutowireCapableBeanFactory acbf = ac.getAutowireCapableBeanFactory();
acbf.autowireBeanProperties(this,
AutowireCapableBeanFactory.AUTOWIRE_BY_NAME, false);
acbf.initializeBean(this, "testClient");
assertNotNull(emf);
assertTrue(
"EntityManagerFactory is instance of EntityManagerFactoryImpl",
emf instanceof EntityManagerFactoryImpl);
}
public void setupCassandra() throws Exception {
Setup setup = ((EntityManagerFactoryImpl) emf).getSetup();
logger.info("Setting up Usergrid schema");
setup.setup();
logger.info("Usergrid schema setup");
logger.info("Setting up Usergrid management services");
managementService.setup();
logger.info("Usergrid management services setup");
}
public void teardownEmbedded() {
logger.info("Stopping Cassandra");
EmbeddedServerHelper.teardown();
}
void setVerbose(CommandLine line) {
if (line.hasOption(VERBOSE)) {
isVerboseEnabled = true;
}
}
/**
* Log the content in the default logger(info)
*
* @param content
*/
void echo(String content) {
if (isVerboseEnabled) {
logger.info(content);
}
}
/**
* Print the object in JSon format.
*
* @param obj
*/
void echo(Object obj) {
echo(mapToFormattedJsonString(obj));
}
@Autowired
public void setEntityManagerFactory(EntityManagerFactory emf) {
this.emf = emf;
}
@Autowired
public void setServiceManagerFactory(ServiceManagerFactory smf) {
this.smf = smf;
logger.info("ManagementResource.setServiceManagerFactory");
}
@Autowired
public void setManagementService(ManagementService managementService) {
this.managementService = managementService;
}
@Autowired
public void setProperties(Properties properties) {
this.properties = properties;
}
@Autowired
public void setCassandraService(CassandraService cass){
this.cass = cass;
}
public abstract void runTool(CommandLine line) throws Exception;
}
|
[
"\"MAVEN_OPTS\""
] |
[] |
[
"MAVEN_OPTS"
] |
[]
|
["MAVEN_OPTS"]
|
java
| 1 | 0 | |
src/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simsc.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/editor/edit_linux.go
|
// +build linux
package editor
import (
"os"
"os/exec"
"github.com/urfave/cli/v2"
)
// Path return the name/path of the preferred editor
func Path(c *cli.Context) string {
if c != nil {
if ed := c.String("editor"); ed != "" {
return ed
}
}
if ed := os.Getenv("EDITOR"); ed != "" {
return ed
}
if p, err := exec.LookPath("editor"); err == nil {
return p
}
// if neither EDITOR is set nor "editor" available we'll just assume that vi
// is installed. If this fails the user will have to set $EDITOR
return "vi"
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
jvm-env/02_iot_data_consumer/src/main/java/io/fission/kafka/IotConsumer.java
|
package io.fission.kafka;
import java.lang.Thread;
import java.lang.Integer;
import java.util.HashMap;
import java.util.logging.Logger;
import org.springframework.http.HttpStatus;
import org.springframework.http.RequestEntity;
import org.springframework.http.ResponseEntity;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.fission.Context;
import io.fission.Function;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
public class IotConsumer implements Function {
private static Logger logger = Logger.getGlobal();
JedisPool pool = new JedisPool(new JedisPoolConfig(), System.getenv("REDIS_ADDR"));
final ObjectMapper mapper = new ObjectMapper();
public ResponseEntity call(RequestEntity req, Context context) {
HashMap data = (HashMap) req.getBody();
int sleepDelay = Integer.parseInt(System.getenv("CONSUMER_SLEEP_DELAY"));
try {
Thread.sleep(sleepDelay);
} catch(Exception exception) {
logger.info("Exception in thread sleep" + exception);
}
logger.info("Data=" + data.toString());
IoTData iotData = mapper.convertValue(data, IoTData.class);
Jedis jedis = null;
try {
jedis = pool.getResource();
// - Add RouteID <> FuelLevel, limited to 10K records
jedis.lpush((iotData.getRouteId()+"-FUEL").toUpperCase(), Double.toString(iotData.getFuelLevel()));
jedis.ltrim((iotData.getRouteId()+"-FUEL").toUpperCase(), 0, 10000);
// - Add RouteID <> Speed, limited to 10K records
jedis.lpush((iotData.getRouteId()+"-SPEED").toUpperCase(), Double.toString(iotData.getSpeed()));
jedis.ltrim((iotData.getRouteId()+"-SPEED").toUpperCase(), 0, 10000);
// - Add Increment Vehicle type by 1
jedis.hincrBy((iotData.getVehicleType().replace(" ", "-")).toUpperCase(), "COUNT", 1);
jedis.hincrBy("RECORD_ACK_BY_CONSUMER", "COUNT", 1);
// - Add Data of Speed across times.
jedis.hset((iotData.getRouteId()+"-DATA").toUpperCase(), iotData.getTimestamp().toString(), Double.toString(iotData.getSpeed()));
} finally {
// You have to close jedis object. If you don't close then
// it doesn't release back to pool and you can't get a new
// resource from pool.
if (jedis != null) {
jedis.close();
}
}
return ResponseEntity.status(HttpStatus.OK).build();
}
}
|
[
"\"REDIS_ADDR\"",
"\"CONSUMER_SLEEP_DELAY\""
] |
[] |
[
"CONSUMER_SLEEP_DELAY",
"REDIS_ADDR"
] |
[]
|
["CONSUMER_SLEEP_DELAY", "REDIS_ADDR"]
|
java
| 2 | 0 | |
cmd/rootlessctl/main.go
|
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"github.com/rootless-containers/rootlesskit/pkg/api/client"
"github.com/rootless-containers/rootlesskit/pkg/version"
)
func main() {
debug := false
app := cli.NewApp()
app.Name = "rootlessctl"
app.Version = version.Version
app.Usage = "RootlessKit API client"
app.Flags = []cli.Flag{
&cli.BoolFlag{
Name: "debug",
Usage: "debug mode",
Destination: &debug,
},
&cli.StringFlag{
Name: "socket",
Usage: "Path to api.sock (under the \"rootlesskit --state-dir\" directory), defaults to $ROOTLESSKIT_STATE_DIR/api.sock",
},
}
app.Commands = []*cli.Command{
&listPortsCommand,
&addPortsCommand,
&removePortsCommand,
&infoCommand,
}
app.Before = func(clicontext *cli.Context) error {
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
if err := app.Run(os.Args); err != nil {
if debug {
fmt.Fprintf(os.Stderr, "error: %+v\n", err)
} else {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
os.Exit(1)
}
}
func newClient(clicontext *cli.Context) (client.Client, error) {
socketPath := clicontext.String("socket")
if socketPath == "" {
stateDir := os.Getenv("ROOTLESSKIT_STATE_DIR")
if stateDir == "" {
return nil, errors.New("please specify --socket or set $ROOTLESSKIT_STATE_DIR")
}
socketPath = filepath.Join(stateDir, "api.sock")
}
return client.New(socketPath)
}
|
[
"\"ROOTLESSKIT_STATE_DIR\""
] |
[] |
[
"ROOTLESSKIT_STATE_DIR"
] |
[]
|
["ROOTLESSKIT_STATE_DIR"]
|
go
| 1 | 0 | |
lldb/test/Shell/helper/toolchain.py
|
import os
import itertools
import platform
import subprocess
import sys
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import FindTool
from lit.llvm.subst import ToolSubst
def _get_lldb_init_path(config):
return os.path.join(config.test_exec_root, 'lit-lldb-init')
def _disallow(config, execName):
warning = '''
echo '*** Do not use \'{0}\' in tests; use \'%''{0}\'. ***' &&
exit 1 && echo
'''
config.substitutions.append((' {0} '.format(execName),
warning.format(execName)))
def use_lldb_substitutions(config):
# Set up substitutions for primary tools. These tools must come from config.lldb_tools_dir
# which is basically the build output directory. We do not want to find these in path or
# anywhere else, since they are specifically the programs which are actually being tested.
dsname = 'debugserver' if platform.system() in ['Darwin'] else 'lldb-server'
dsargs = [] if platform.system() in ['Darwin'] else ['gdbserver']
build_script = os.path.dirname(__file__)
build_script = os.path.join(build_script, 'build.py')
build_script_args = [build_script,
'--compiler=any', # Default to best compiler
'--arch=' + str(config.lldb_bitness)]
if config.lldb_lit_tools_dir:
build_script_args.append('--tools-dir={0}'.format(config.lldb_lit_tools_dir))
if config.lldb_tools_dir:
build_script_args.append('--tools-dir={0}'.format(config.lldb_tools_dir))
if config.llvm_libs_dir:
build_script_args.append('--libs-dir={0}'.format(config.llvm_libs_dir))
lldb_init = _get_lldb_init_path(config)
primary_tools = [
ToolSubst('%lldb',
command=FindTool('lldb'),
extra_args=['--no-lldbinit', '-S', lldb_init],
unresolved='fatal'),
ToolSubst('%lldb-init',
command=FindTool('lldb'),
extra_args=['-S', lldb_init],
unresolved='fatal'),
ToolSubst('%lldb-noinit',
command=FindTool('lldb'),
extra_args=['--no-lldbinit'],
unresolved='fatal'),
ToolSubst('%lldb-server',
command=FindTool("lldb-server"),
extra_args=[],
unresolved='ignore'),
ToolSubst('%debugserver',
command=FindTool(dsname),
extra_args=dsargs,
unresolved='ignore'),
ToolSubst('%platformserver',
command=FindTool('lldb-server'),
extra_args=['platform'],
unresolved='ignore'),
'lldb-test',
'lldb-instr',
'lldb-vscode',
ToolSubst('%build',
command="'" + sys.executable + "'",
extra_args=build_script_args)
]
_disallow(config, 'lldb')
_disallow(config, 'lldb-server')
_disallow(config, 'debugserver')
_disallow(config, 'platformserver')
llvm_config.add_tool_substitutions(primary_tools, [config.lldb_tools_dir])
def _use_msvc_substitutions(config):
# If running from a Visual Studio Command prompt (e.g. vcvars), this will
# detect the include and lib paths, and find cl.exe and link.exe and create
# substitutions for each of them that explicitly specify /I and /L paths
cl = lit.util.which('cl')
link = lit.util.which('link')
if not cl or not link:
return
cl = '"' + cl + '"'
link = '"' + link + '"'
includes = os.getenv('INCLUDE', '').split(';')
libs = os.getenv('LIB', '').split(';')
config.available_features.add('msvc')
compiler_flags = ['"/I{}"'.format(x) for x in includes if os.path.exists(x)]
linker_flags = ['"/LIBPATH:{}"'.format(x) for x in libs if os.path.exists(x)]
tools = [
ToolSubst('%msvc_cl', command=cl, extra_args=compiler_flags),
ToolSubst('%msvc_link', command=link, extra_args=linker_flags)]
llvm_config.add_tool_substitutions(tools)
return
def use_support_substitutions(config):
# Set up substitutions for support tools. These tools can be overridden at the CMake
# level (by specifying -DLLDB_LIT_TOOLS_DIR), installed, or as a last resort, we can use
# the just-built version.
host_flags = ['--target=' + config.host_triple]
if platform.system() in ['Darwin']:
try:
out = subprocess.check_output(['xcrun', '--show-sdk-path']).strip()
res = 0
except OSError:
res = -1
if res == 0 and out:
sdk_path = lit.util.to_string(out)
llvm_config.lit_config.note('using SDKROOT: %r' % sdk_path)
host_flags += ['-isysroot', sdk_path]
elif sys.platform != 'win32':
host_flags += ['-pthread']
config.target_shared_library_suffix = '.dylib' if platform.system() in ['Darwin'] else '.so'
config.substitutions.append(('%target-shared-library-suffix', config.target_shared_library_suffix))
# Swift support
swift_args = ['-module-cache-path',
os.path.join(os.path.dirname(config.lldb_libs_dir),
'lldb-test-build.noindex',
'module-cache-clang')]
swift_driver_args = []
if platform.system() in ['Darwin']:
swift_args += ['-sdk', sdk_path]
tools = [
ToolSubst(
'%target-swiftc', command=config.swiftc,
extra_args=swift_args + swift_driver_args),
ToolSubst(
'%target-swift-frontend', command=config.swiftc[:-1],
extra_args=(['-frontend'] + swift_args))
]
llvm_config.add_tool_substitutions(tools)
swift_bin_dir = os.path.dirname(config.swiftc)
swift_Benchmark_Onone = os.path.join(swift_bin_dir,
'Benchmark_Onone-{0}'.format(config.target_triple))
if os.path.exists(swift_Benchmark_Onone):
config.substitutions.append(('%swift_Benchmark_Onone',
swift_Benchmark_Onone))
config.available_features.add('swift_Benchmark_Onone')
if sys.platform.startswith('netbsd'):
# needed e.g. to use freshly built libc++
host_flags += ['-L' + config.llvm_libs_dir,
'-Wl,-rpath,' + config.llvm_libs_dir]
# The clang module cache is used for building inferiors.
host_flags += ['-fmodules-cache-path={}'.format(config.clang_module_cache)]
host_flags = ' '.join(host_flags)
config.substitutions.append(('%clang_host', '%clang ' + host_flags))
config.substitutions.append(('%clangxx_host', '%clangxx ' + host_flags))
config.substitutions.append(('%clang_cl_host', '%clang_cl --target='+config.host_triple))
additional_tool_dirs=[]
if config.lldb_lit_tools_dir:
additional_tool_dirs.append(config.lldb_lit_tools_dir)
llvm_config.use_clang(additional_flags=['--target=specify-a-target-or-use-a-_host-substitution'],
additional_tool_dirs=additional_tool_dirs,
required=True, use_installed=True)
if sys.platform == 'win32':
_use_msvc_substitutions(config)
have_lld = llvm_config.use_lld(additional_tool_dirs=additional_tool_dirs,
required=False, use_installed=True)
if have_lld:
config.available_features.add('lld')
support_tools = ['yaml2obj', 'obj2yaml', 'llvm-dwp', 'llvm-pdbutil',
'llvm-mc', 'llvm-readobj', 'llvm-objdump',
'llvm-objcopy', 'lli']
additional_tool_dirs += [config.lldb_tools_dir, config.llvm_tools_dir]
llvm_config.add_tool_substitutions(support_tools, additional_tool_dirs)
_disallow(config, 'clang')
def use_lldb_repro_substitutions(config, mode):
lldb_init = _get_lldb_init_path(config)
substitutions = [
ToolSubst(
'%lldb',
command=FindTool('lldb-repro'),
extra_args=[mode, '--no-lldbinit', '-S', lldb_init]),
ToolSubst(
'%lldb-init',
command=FindTool('lldb-repro'),
extra_args=[mode, '-S', lldb_init]),
]
llvm_config.add_tool_substitutions(substitutions, [config.lldb_tools_dir])
|
[] |
[] |
[
"LIB",
"INCLUDE"
] |
[]
|
["LIB", "INCLUDE"]
|
python
| 2 | 0 | |
core/server.go
|
package core
//Library for central processing and server functions
import (
"errors"
"fmt"
"net"
"os"
"github.com/joho/godotenv"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/schema"
)
var (
Server = TServer{
Mode: 0,
Host: "127.0.0.1",
Port: ":8000",
//
DBName: "tmp/test.db",
DBConfig: &gorm.Config{
NamingStrategy: schema.NamingStrategy{
SingularTable: true, // use singular table name, table for `User` would be `user` with this option enabled
},
Logger: logger.Default.LogMode(logger.Silent)},
//
Path: "",
TestfileCatalog: "tmp/catalog.json",
TestfileOrder: "tmp/order.json",
TestfileOrga: "tmp/orga.json",
TestfileResource: "tmp/resource.json",
TestfileCalendar: "tmp/calendar.json",
//
BingURLLocation: "https://dev.virtualearth.net/REST/v1/Locations/%s/%s/%s/%s?" +
"includeNeighborhood=1&include=ciso2&maxResults=%d&key=%s",
BingURLTimezone: "https://dev.virtualearth.net/REST/v1/TimeZone/?query=%s&key=%s",
BingApiKey: "", //put api key in .env file
//
ForcastPeriod: 14,
}
)
type TServer struct {
Mode int
Host string
Port string
DB *gorm.DB
DBName string
DBConfig *gorm.Config
Path string
TestfileCatalog string
TestfileOrder string
TestfileOrga string
TestfileResource string
TestfileCalendar string
BingURLLocation string
BingURLTimezone string
BingApiKey string
ForcastPeriod uint
}
//externalIP determines the external IP address
func GetExternalIP() (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return "", err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String(), nil
}
}
return "", errors.New("Are you connected to the network?")
}
//ServerInit Initialize server, read .env variables
// Initialize DB
// iMode int -> 0:run or 1:Testdaten
// iPath string -> path for data files
func ServerInit(iMode int, iPath string) (me *TServer, err error) {
me = &Server
me.Mode = iMode
me.Path = iPath
//load and handle .env variables
err = godotenv.Load(iPath + ".env")
if os.Getenv("port") != "" {
me.Port = ":" + os.Getenv("port")
}
//IP Determine address
if ip, err := GetExternalIP(); err == nil {
me.Host = ip
fmt.Println("IP address set:", ip)
}
//api-key for geo-coding
if os.Getenv("bing_api_key") != "" {
me.BingApiKey = os.Getenv("bing_api_key")
}
//init DB
me.DB, err = gorm.Open(sqlite.Open(iPath+me.DBName), me.DBConfig)
InitCatalogDB(me.Mode)
InitLocationDB(me.Mode)
InitRequirementDB(me.Mode)
InitCalendarDB(me.Mode)
InitOrgaDB(me.Mode)
InitResourceDB(me.Mode)
InitOrderDB(me.Mode)
InitAssignmentDB(me.Mode)
if iMode == 1 {
InitAllResourcesFromNow(me.ForcastPeriod, true)
}
return
}
|
[
"\"port\"",
"\"port\"",
"\"bing_api_key\"",
"\"bing_api_key\""
] |
[] |
[
"port",
"bing_api_key"
] |
[]
|
["port", "bing_api_key"]
|
go
| 2 | 0 | |
msb2pilot/src/msb2pilot/util/common_test.go
|
/**
* Copyright (c) 2018 ZTE Corporation.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* ZTE - initial Project
*/
package util
import (
"os"
"strings"
"testing"
)
func TestGetCfgPath(t *testing.T) {
got := GetCfgPath()
if !strings.Contains(got, "conf") {
t.Errorf("GetCfgPath() => got %v, should contains `ocnf`", got)
}
}
func TestGetGoPath(t *testing.T) {
oldPaths := os.Getenv("GOPATH")
cases := []struct {
in string
want []string
}{
{ // window
in: `path1;path2;path3`,
want: []string{
`path1`,
`path2`,
`path3`,
},
},
{ // linux
in: `path1:path2:path3`,
want: []string{
`path1`,
`path2`,
`path3`,
},
},
{ // single Path
in: `path1`,
want: []string{
`path1`,
},
},
{ // single Path
in: `;`,
want: []string{
``, ``,
},
},
}
for _, cas := range cases {
os.Setenv("GOPATH", cas.in)
got := GetGoPath()
if len(cas.want) != len(got) {
t.Errorf("GetGoPath() => different size, got %d, want %d, %v, %v", len(got), len(cas.want), got, cas.want)
}
for i, item := range cas.want {
if item != got[i] {
t.Errorf("GetGoPath() => got %v, want %v", got, cas.want)
break
}
}
}
// unset test
os.Unsetenv("GOPATH")
got := GetGoPath()
if len(got) != 0 {
t.Errorf("GetGoPath() => unset env test got len %d, want 0", len(got))
}
os.Setenv("GOPATH", oldPaths)
}
func TestFileExists(t *testing.T) {
existFile := `common_test.go`
notExistFile := `common_test.go_11`
exist := FileExists(existFile)
if !exist {
t.Errorf("FileExists(%s) => got false, want true", existFile)
}
exist = FileExists(notExistFile)
if exist {
t.Errorf("FileExists(%s) => got true, want false", notExistFile)
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
selfdrive/car/interfaces.py
|
import os
import time
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 144 + 4 = 92 mph
# generic car and radar interfaces
class CarInterfaceBase():
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.low_speed_alert = False
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.
@staticmethod
def compute_gb(accel, speed):
raise NotImplementedError
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
raise NotImplementedError
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.isPandaBlack = True # TODO: deprecate this field
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
# stock ACC by default
ret.enableCruise = True
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5] # half max brake
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.openpilotLongitudinalControl = False
ret.startAccel = 0.0
ret.stoppingControl = False
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
return ret
# returns a car.CarState, pass in car.CarControl
def update(self, c, can_strings):
raise NotImplementedError
# return sendcan, pass in a car.CarControl
def apply(self, c):
raise NotImplementedError
def create_common_events(self, cs_out, extra_gears=[], gas_resume_speed=-1, pcm_enable=True): # pylint: disable=dangerous-default-value
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter != GearShifter.drive and cs_out.gearShifter not in extra_gears:
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if cs_out.steerError:
events.add(EventName.steerUnavailable)
elif cs_out.steerWarning:
events.add(EventName.steerTempUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
# Optionally allow to press gas at zero speed to resume.
# e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME!
if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase():
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase:
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def update_blinker(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
@staticmethod
def parse_gear_shifter(gear):
return {'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake}.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/create_remote_scripts.py
|
#!/usr/bin/env python2
# Created by Guillaume Leurquin, [email protected]
"""
create_remote_scripts.py crypto_config.yaml aws_config.json
Requires GEN_PATH environment variable to be set,
which points to the hyperledger fabric certificate
structure created by cryptogen.py
Creates scripts to remotely connect to the network,
create and join all channels, and also update the chaincodes remotely
"""
import os
import sys
import json
import yaml
GEN_PATH = os.environ["GEN_PATH"]
DEBUG = False
def fail(msg):
"""Prints the error message and exits"""
sys.stderr.write('\033[91m' + msg + '\033[0m\n')
exit(1)
def call(script, *args):
"""Calls the given script using the args"""
cmd = script + " " + " ".join(args)
if DEBUG:
print cmd
if os.system(cmd) != 0:
fail("\nERROR: An error occured while executing " + cmd + ". See above for details.")
if len(sys.argv) != 3:
fail("Usage: create_remote_scripts crypto_config aws_config ")
YAML_CONFIG = sys.argv[1]
AWS_CONFIG = sys.argv[2]
CREATE_AND_JOIN_CHANNELS_REMOTE_SCRIPT = GEN_PATH + '/scripts/create_and_join_channels_remote.sh'
CHAINCODE_REMOTE_SCRIPT = GEN_PATH + '/scripts/update_remote_chaincodes.sh'
SCRIPT_PREAMBLE = """#!/bin/bash
# This file is auto-generated
set -eu -o pipefail
echo "Modifying /etc/hosts..."
INSTALL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
$INSTALL_DIR/set_hosts_public.sh
"""
def create_remote_channel_script(CONF, AWS, channels_remote_script):
channels_remote_script.write(SCRIPT_PREAMBLE)
for org in CONF['Orgs']:
if 'peers' in org and org['peers'] is not None:
for peer in org['peers']:
if 'Tools' in peer:
channels_remote_script.write("cmd=\"docker exec -it tools.{0} bash -c\"\n".format(org['Domain']))
template = "ssh -oStrictHostKeyChecking=no -i {0} -t {1}@tools.{2} $cmd '\"/etc/hyperledger/configtx/create_and_join_channel.sh {3}\"'\n"
channels_remote_script.write(template.format(
AWS['private_key_path'],
AWS['ssh_username'],
org['Domain'],
peer['Tools']
))
REMOTE_CHAINCODE_SCRIPT_PREAMBLE = """#!/bin/bash
# This file is auto-generated
set -eu -o pipefail
echo "Make sure the channels have been created before running this script"
echo "Make sure that set_public_hosts.sh has been run before running this script"
"""
def create_remote_chaincode_script(CONF, AWS, chaincode_remote_script):
chaincode_remote_script.write(SCRIPT_PREAMBLE)
for org in CONF['Orgs']:
if 'peers' in org and org['peers'] is not None:
for peer in org['peers']:
if 'Tools' in peer:
chaincode_remote_script.write("cmd=\"docker exec -it tools.{0} bash -c\"\n".format(org['Domain']))
template = "ssh -oStrictHostKeyChecking=no -i {0} -t {1}@tools.{2} $cmd '\"/etc/hyperledger/chaincode_tools/update_chaincodes.py --repository {3} --chaincodeBasePath {4} {5}\"'\n"
chaincode_remote_script.write(template.format(
AWS['private_key_path'],
AWS['ssh_username'],
org['Domain'],
AWS['chaincode_repository'],
AWS['chaincode_base_path'],
"--build" if AWS['chaincode_build'] else ""
))
return
raise Exception('No tools found in the configuration file')
call('mkdir -p', GEN_PATH + "/scripts")
with open(YAML_CONFIG, 'r') as stream:
with open(AWS_CONFIG, 'r') as aws_stream:
with open(CREATE_AND_JOIN_CHANNELS_REMOTE_SCRIPT, 'w') as remote_channels_script:
with open(CHAINCODE_REMOTE_SCRIPT, 'w') as remote_chaincode_script:
try:
CONF = yaml.load(stream)
AWS = json.load(aws_stream)
create_remote_channel_script(CONF, AWS, remote_channels_script)
create_remote_chaincode_script(CONF, AWS, remote_chaincode_script)
except yaml.YAMLError as exc:
print exc
call("chmod +x", CREATE_AND_JOIN_CHANNELS_REMOTE_SCRIPT)
call("chmod +x", CHAINCODE_REMOTE_SCRIPT)
|
[] |
[] |
[
"GEN_PATH"
] |
[]
|
["GEN_PATH"]
|
python
| 1 | 0 | |
cmd/commands/version/version.go
|
package version
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"os/exec"
path "path/filepath"
"regexp"
"runtime"
"strings"
"github.com/beego/bee/cmd/commands"
beeLogger "github.com/beego/bee/logger"
"github.com/beego/bee/logger/colors"
"github.com/beego/bee/utils"
"gopkg.in/yaml.v2"
)
const verboseVersionBanner string = `%s%s______
| ___ \
| |_/ / ___ ___
| ___ \ / _ \ / _ \
| |_/ /| __/| __/
\____/ \___| \___| v{{ .BeeVersion }}%s
%s%s
├── Beego : {{ .BeegoVersion }}
├── GoVersion : {{ .GoVersion }}
├── GOOS : {{ .GOOS }}
├── GOARCH : {{ .GOARCH }}
├── NumCPU : {{ .NumCPU }}
├── GOPATH : {{ .GOPATH }}
├── GOROOT : {{ .GOROOT }}
├── Compiler : {{ .Compiler }}
└── Date : {{ Now "Monday, 2 Jan 2006" }}%s
`
const shortVersionBanner = `______
| ___ \
| |_/ / ___ ___
| ___ \ / _ \ / _ \
| |_/ /| __/| __/
\____/ \___| \___| v{{ .BeeVersion }}
`
var CmdVersion = &commands.Command{
UsageLine: "version",
Short: "Prints the current Bee version",
Long: `
Prints the current Bee, Beego and Go version alongside the platform information.
`,
Run: versionCmd,
}
var outputFormat string
const version = "1.12.1"
func init() {
fs := flag.NewFlagSet("version", flag.ContinueOnError)
fs.StringVar(&outputFormat, "o", "", "Set the output format. Either json or yaml.")
CmdVersion.Flag = *fs
commands.AvailableCommands = append(commands.AvailableCommands, CmdVersion)
}
func versionCmd(cmd *commands.Command, args []string) int {
cmd.Flag.Parse(args)
stdout := cmd.Out()
if outputFormat != "" {
runtimeInfo := RuntimeInfo{
GetGoVersion(),
runtime.GOOS,
runtime.GOARCH,
runtime.NumCPU(),
os.Getenv("GOPATH"),
runtime.GOROOT(),
runtime.Compiler,
version,
GetBeegoVersion(),
}
switch outputFormat {
case "json":
{
b, err := json.MarshalIndent(runtimeInfo, "", " ")
if err != nil {
beeLogger.Log.Error(err.Error())
}
fmt.Println(string(b))
return 0
}
case "yaml":
{
b, err := yaml.Marshal(&runtimeInfo)
if err != nil {
beeLogger.Log.Error(err.Error())
}
fmt.Println(string(b))
return 0
}
}
}
coloredBanner := fmt.Sprintf(verboseVersionBanner, "\x1b[35m", "\x1b[1m",
"\x1b[0m", "\x1b[32m", "\x1b[1m", "\x1b[0m")
InitBanner(stdout, bytes.NewBufferString(coloredBanner))
return 0
}
// ShowShortVersionBanner prints the short version banner.
func ShowShortVersionBanner() {
output := colors.NewColorWriter(os.Stdout)
InitBanner(output, bytes.NewBufferString(colors.MagentaBold(shortVersionBanner)))
}
func GetBeegoVersion() string {
re, err := regexp.Compile(`VERSION = "([0-9.]+)"`)
if err != nil {
return ""
}
wgopath := utils.GetGOPATHs()
if len(wgopath) == 0 {
beeLogger.Log.Error("You need to set GOPATH environment variable")
return ""
}
for _, wg := range wgopath {
wg, _ = path.EvalSymlinks(path.Join(wg, "src", "github.com", "astaxie", "beego"))
filename := path.Join(wg, "beego.go")
_, err := os.Stat(filename)
if err != nil {
if os.IsNotExist(err) {
continue
}
beeLogger.Log.Error("Error while getting stats of 'beego.go'")
}
fd, err := os.Open(filename)
if err != nil {
beeLogger.Log.Error("Error while reading 'beego.go'")
continue
}
reader := bufio.NewReader(fd)
for {
byteLine, _, er := reader.ReadLine()
if er != nil && er != io.EOF {
return ""
}
if er == io.EOF {
break
}
line := string(byteLine)
s := re.FindStringSubmatch(line)
if len(s) >= 2 {
return s[1]
}
}
}
return "Beego is not installed. Please do consider installing it first: https://github.com/astaxie/beego"
}
func GetGoVersion() string {
var (
cmdOut []byte
err error
)
if cmdOut, err = exec.Command("go", "version").Output(); err != nil {
beeLogger.Log.Fatalf("There was an error running 'go version' command: %s", err)
}
return strings.Split(string(cmdOut), " ")[2]
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
component/demo_redis/redis_demo1.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-03-19 20:20
# @Author : erwin
import redis
import json
class RedisClient(object):
config_key = 'redis'
def __init__(self):
self.host = "localhost"
self.port = "6379"
self.db = "0"
self.redis_client = redis.StrictRedis(host=self.host, port=self.port, db=self.db)
def clear_all(self):
"""
:return: 清空所有数据
"""
self.redis_client.flushdb()
def delete_key(self, keystr):
"""
:param keystr:
:return: 删除指定类型的key
"""
key_list = []
for key in self.redis_client.scan_iter(match=keystr + '*', count=10000):
key_list.append(key)
for key in key_list:
self.redis_client.delete(key)
def set_cache_data(self, contents, expire=3600 * 24):
"""
:param contents:
:param expire:
:return: 批量插入数据
"""
for content in contents:
key = content['key']
content.pop('key')
self.redis_client.setex(key, expire, json.dumps(content))
def set_single_data(self, key, value):
"""
:param key:
:param value:
:return: 插入单条数据
"""
self.redis_client.set(key, value)
self.redis_client.save()
def get_cache_bluk_data(self, keys):
"""
:param keys:
:return: 查询,这里keys 是一个list
"""
pipe = self.redis_client.pipeline()
pipe.mget(keys)
res_ls = []
for (k, v) in zip(keys, pipe.execute()):
res_ls.extend([json.loads(item) for item in v])
return res_ls
def get_cache_json_data(self, key):
"""
:param key:
:return: 查询,这里key是一个字符串
"""
keys = self.redis_client.keys(key + "*")
pipe = self.redis_client.pipeline()
pipe.mget(keys)
res_ls = []
for (k, v) in zip(keys, pipe.execute()):
res_ls.extend([json.loads(item) for item in v])
return res_ls
def get_cache_data(self, key):
"""
:param key:
:return: 查询,这里key是一个字符串
"""
keys = self.redis_client.keys(key + "*")
if len(keys) == 0:
return [], []
pipe = self.redis_client.pipeline()
pipe.mget(keys)
value_ls = []
for v in pipe.execute():
value_ls.extend(v)
res_ls = []
for k, v in zip(keys, value_ls):
res_ls.append((k, v))
return res_ls
if __name__ == '__main__':
print(RedisClient().get_cache_bluk_data(
["error500_component1_stype1_c1_etype1_2019-03-19_100_2019-01-19 07:10:00",
"error500_component1_stype1_c1_etype1_2019-03-19_100_2019-01-19 07:15:00",
"error500_component1_stype1_c1_etype1_2019-03-19_100_2019-01-19 07:20:00"]
))
print(RedisClient().get_cache_data("error500_component1_stype1_c1_etype1_2019-03-19_100_2019-01-19 07:1"))
print(RedisClient().get_cache_data("aa"))
print(RedisClient().get_cache_bluk_data("aa"))
RedisClient().set_single_data("aa", 1)
res_ls = RedisClient().get_cache_data("[email protected]")
# keys, value_ls = RedisClient().get_cache_data("[email protected]")
for i in res_ls:
print(i)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
services/filesstore/filesstore_test.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filesstore
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/mattermost/mattermost-server/v5/mlog"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/utils"
)
type FileBackendTestSuite struct {
suite.Suite
settings model.FileSettings
backend FileBackend
}
func TestLocalFileBackendTestSuite(t *testing.T) {
// Setup a global logger to catch tests logging outside of app context
// The global logger will be stomped by apps initializing but that's fine for testing. Ideally this won't happen.
mlog.InitGlobalLogger(mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
ConsoleLevel: "error",
EnableFile: false,
}))
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_LOCAL),
Directory: &dir,
},
})
}
func TestS3FileBackendTestSuite(t *testing.T) {
runBackendTest(t, false)
}
func TestS3FileBackendTestSuiteWithEncryption(t *testing.T) {
runBackendTest(t, true)
}
func runBackendTest(t *testing.T, encrypt bool) {
s3Host := os.Getenv("CI_MINIO_HOST")
if s3Host == "" {
s3Host = "localhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9000"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_S3),
AmazonS3AccessKeyId: model.NewString(model.MINIO_ACCESS_KEY),
AmazonS3SecretAccessKey: model.NewString(model.MINIO_SECRET_KEY),
AmazonS3Bucket: model.NewString(model.MINIO_BUCKET),
AmazonS3Region: model.NewString(""),
AmazonS3Endpoint: model.NewString(s3Endpoint),
AmazonS3PathPrefix: model.NewString(""),
AmazonS3SSL: model.NewBool(false),
AmazonS3SSE: model.NewBool(encrypt),
},
})
}
func (s *FileBackendTestSuite) SetupTest() {
utils.TranslationsPreInit()
backend, err := NewFileBackend(&s.settings, true)
require.Nil(s.T(), err)
s.backend = backend
}
func (s *FileBackendTestSuite) TestConnection() {
s.Nil(s.backend.TestConnection())
}
func (s *FileBackendTestSuite) TestReadWriteFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "test")
}
func (s *FileBackendTestSuite) TestReadWriteFileImage() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "testimage")
}
func (s *FileBackendTestSuite) TestFileExists() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
_, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
defer s.backend.RemoveFile(path)
res, err := s.backend.FileExists(path)
s.Nil(err)
s.True(res)
res, err = s.backend.FileExists("tests/idontexist.png")
s.Nil(err)
s.False(res)
}
func (s *FileBackendTestSuite) TestCopyFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Nil(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestCopyFileToDirectoryThatDoesntExist() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/newdirectory/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Nil(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestMoveFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
s.Nil(s.backend.MoveFile(path1, path2))
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Error(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestRemoveFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveFile(path))
_, err = s.backend.ReadFile(path)
s.Error(err)
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/asdf")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
}
func (s *FileBackendTestSuite) TestListDirectory() {
b := []byte("test")
path1 := "19700101/" + model.NewId()
path2 := "19800101/" + model.NewId()
paths, err := s.backend.ListDirectory("19700101")
s.Nil(err)
s.Len(*paths, 0)
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), path2)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
paths, err = s.backend.ListDirectory("19700101")
s.Nil(err)
s.Len(*paths, 1)
s.Equal(path1, (*paths)[0])
paths, err = s.backend.ListDirectory("19700101/")
s.Nil(err)
s.Len(*paths, 1)
s.Equal(path1, (*paths)[0])
paths, err = s.backend.ListDirectory("")
s.Nil(err)
found1 := false
found2 := false
for _, path := range *paths {
if path == "19700101" {
found1 = true
} else if path == "19800101" {
found2 = true
}
}
s.True(found1)
s.True(found2)
s.backend.RemoveFile(path1)
s.backend.RemoveFile(path2)
}
func (s *FileBackendTestSuite) TestRemoveDirectory() {
b := []byte("test")
written, err := s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/aaa")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
_, err = s.backend.ReadFile("tests2/foo")
s.Error(err)
_, err = s.backend.ReadFile("tests2/bar")
s.Error(err)
_, err = s.backend.ReadFile("tests2/asdf")
s.Error(err)
}
|
[
"\"CI_MINIO_HOST\"",
"\"CI_MINIO_PORT\""
] |
[] |
[
"CI_MINIO_PORT",
"CI_MINIO_HOST"
] |
[]
|
["CI_MINIO_PORT", "CI_MINIO_HOST"]
|
go
| 2 | 0 | |
benchbuild/extensions/run.py
|
import logging
import os
import yaml
from plumbum import local
from benchbuild.extensions import base
from benchbuild.utils import db, run
from benchbuild.utils.settings import get_number_of_jobs
LOG = logging.getLogger(__name__)
class RuntimeExtension(base.Extension):
"""
Default extension to execute and track a binary.
This can be used for runtime experiments to have a controlled,
tracked execution of a wrapped binary.
"""
def __init__(self, project, experiment, *extensions, config=None):
self.project = project
self.experiment = experiment
super().__init__(*extensions, config=config)
def __call__(self, binary_command, *args, **kwargs):
self.project.name = kwargs.get("project_name", self.project.name)
cmd = binary_command[args]
with run.track_execution(
cmd, self.project, self.experiment, **kwargs
) as _run:
run_info = _run()
if self.config:
run_info.add_payload("config", self.config)
LOG.info(
yaml.dump(
self.config,
width=40,
indent=4,
default_flow_style=False
)
)
self.config['baseline'] = \
os.getenv("BB_IS_BASELINE", "False")
db.persist_config(
run_info.db_run, run_info.session, self.config
)
res = self.call_next(binary_command, *args, **kwargs)
res.append(run_info)
return res
def __str__(self):
return "Run wrapped binary"
class WithTimeout(base.Extension):
"""
Guard a binary with a timeout.
This wraps a any binary with a call to `timeout` and sets
the limit to a given value on extension construction.
"""
def __init__(self, *extensions, limit="10m", **kwargs):
super().__init__(*extensions, **kwargs)
self.limit = limit
def __call__(self, binary_command, *args, **kwargs):
from benchbuild.utils.cmd import timeout
return self.call_next(
timeout[self.limit, binary_command], *args, **kwargs
)
class SetThreadLimit(base.Extension):
"""Sets the OpenMP thread limit, based on your settings.
This extension uses the 'jobs' settings and controls the environment
variable OMP_NUM_THREADS.
"""
def __call__(self, binary_command, *args, **kwargs):
from benchbuild.settings import CFG
config = self.config
if config is not None and 'jobs' in config.keys():
jobs = get_number_of_jobs(config)
else:
LOG.warning("Parameter 'config' was unusable, using defaults")
jobs = get_number_of_jobs(CFG)
ret = None
with local.env(OMP_NUM_THREADS=str(jobs)):
ret = self.call_next(binary_command, *args, **kwargs)
return ret
def __str__(self):
return "Limit number of OpenMP threads"
class Rerun(base.Extension):
pass
|
[] |
[] |
[
"BB_IS_BASELINE"
] |
[]
|
["BB_IS_BASELINE"]
|
python
| 1 | 0 | |
src/scripts/schnetpack_md17.py
|
#!/usr/bin/env python
import argparse
import logging
import os
from shutil import copyfile, rmtree
import numpy as np
import torch
import torch.nn as nn
from ase.data import atomic_numbers
from torch.optim import Adam
from torch.utils.data.sampler import RandomSampler
import schnetpack as spk
from schnetpack.datasets import MD17
from schnetpack.utils import compute_params, to_json, read_from_json
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
## command-specific
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_parser.add_argument('--cuda', help='Set flag to use GPU(s)', action='store_true')
cmd_parser.add_argument('--parallel',
help='Run data-parallel on all available GPUs (specify with environment variable'
+ ' CUDA_VISIBLE_DEVICES)', action='store_true')
cmd_parser.add_argument('--batch_size', type=int,
help='Mini-batch size for training and prediction (default: %(default)s)',
default=100)
## training
train_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
train_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')
train_parser.add_argument('molecule', help='Selected molecule trajectory of MD17 collection',
choices=MD17.existing_datasets)
train_parser.add_argument('modelpath', help='Destination for models and logs')
train_parser.add_argument('--seed', type=int, default=None, help='Set random seed for torch and numpy.')
train_parser.add_argument('--overwrite', help='Remove previous model directory.', action='store_true')
# data split
train_parser.add_argument('--split_path', help='Path / destination of npz with data splits',
default=None)
train_parser.add_argument('--split', help='Give sizes of train and validation splits and use remaining for testing',
type=int, nargs=2, default=[None, None])
train_parser.add_argument('--max_epochs', type=int, help='Maximum number of training epochs (default: %(default)s)',
default=5000)
train_parser.add_argument('--lr', type=float, help='Initial learning rate (default: %(default)s)',
default=1e-4)
train_parser.add_argument('--lr_patience', type=int,
help='Epochs without improvement before reducing the learning rate (default: %(default)s)',
default=25)
train_parser.add_argument('--lr_decay', type=float, help='Learning rate decay (default: %(default)s)',
default=0.5)
train_parser.add_argument('--lr_min', type=float, help='Minimal learning rate (default: %(default)s)',
default=1e-6)
train_parser.add_argument('--rho', type=float,
help='Energy-force trade-off. For rho=0, use forces only. (default: %(default)s)',
default=0.1)
train_parser.add_argument('--logger', help='Choose logger for training process (default: %(default)s)',
choices=['csv', 'tensorboard'], default='csv')
train_parser.add_argument('--log_every_n_epochs', type=int,
help='Log metrics every given number of epochs (default: %(default)s)',
default=1)
## evaluation
eval_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
eval_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')
eval_parser.add_argument('molecule', help='Molecule trajectory',
choices=MD17.existing_datasets)
eval_parser.add_argument('modelpath', help='Path of stored model')
eval_parser.add_argument('--split', help='Evaluate on trained model on given split',
choices=['train', 'validation', 'test'], default=['test'], nargs='+')
# model-specific parsers
model_parser = argparse.ArgumentParser(add_help=False)
####### SchNet #######
schnet_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])
schnet_parser.add_argument('--features', type=int, help='Size of atom-wise representation (default: %(default)s)',
default=256)
schnet_parser.add_argument('--interactions', type=int, help='Number of interaction blocks (default: %(default)s)',
default=6)
schnet_parser.add_argument('--cutoff', type=float, default=5.,
help='Cutoff radius of local environment (default: %(default)s)')
schnet_parser.add_argument('--num_gaussians', type=int, default=25,
help='Number of Gaussians to expand distances (default: %(default)s)')
####### wACSF ########
wacsf_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])
# wACSF parameters
wacsf_parser.add_argument('--radial', type=int, default=22,
help='Number of radial symmetry functions (default: %(default)s)')
wacsf_parser.add_argument('--angular', type=int, default=5,
help='Number of angular symmetry functions (default: %(default)s)')
wacsf_parser.add_argument('--zetas', type=int, nargs='+', default=[1],
help='List of zeta exponents used for angle resolution (default: %(default)s)')
wacsf_parser.add_argument('--standardize', action='store_true',
help='Standardize wACSF before atomistic network.')
wacsf_parser.add_argument('--cutoff', type=float, default=5.0,
help='Cutoff radius of local environment (default: %(default)s)')
# Atomistic network parameters
wacsf_parser.add_argument('--n_nodes', type=int, default=100,
help='Number of nodes in atomic networks (default: %(default)s)')
wacsf_parser.add_argument('--n_layers', type=int, default=2,
help='Number of layers in atomic networks (default: %(default)s)')
# Advances wACSF settings
wacsf_parser.add_argument('--centered', action='store_true', help='Use centered Gaussians for radial functions')
wacsf_parser.add_argument('--crossterms', action='store_true', help='Use crossterms in angular functions')
wacsf_parser.add_argument('--behler', action='store_true', help='Switch to conventional ACSF')
wacsf_parser.add_argument('--elements', default=['H', 'C', 'O'], nargs='+',
help='List of elements to be used for symmetry functions (default: %(default)s).')
## setup subparser structure
cmd_subparsers = main_parser.add_subparsers(dest='mode', help='Command-specific arguments')
cmd_subparsers.required = True
subparser_train = cmd_subparsers.add_parser('train', help='Training help')
subparser_eval = cmd_subparsers.add_parser('eval', help='Eval help')
subparser_export = cmd_subparsers.add_parser('export', help='Export help')
subparser_export.add_argument('modelpath', help='Path of stored model')
subparser_export.add_argument('destpath', help='Destination path for exported model')
train_subparsers = subparser_train.add_subparsers(dest='model', help='Model-specific arguments')
train_subparsers.required = True
train_subparsers.add_parser('schnet', help='SchNet help', parents=[train_parser, schnet_parser])
train_subparsers.add_parser('wacsf', help='wACSF help', parents=[train_parser, wacsf_parser])
eval_subparsers = subparser_eval.add_subparsers(dest='model', help='Model-specific arguments')
eval_subparsers.required = True
eval_subparsers.add_parser('schnet', help='SchNet help', parents=[eval_parser, schnet_parser])
eval_subparsers.add_parser('wacsf', help='wACSF help', parents=[eval_parser, wacsf_parser])
return main_parser
def train(args, model, train_loader, val_loader, device):
# setup hook and logging
hooks = [
spk.train.MaxEpochHook(args.max_epochs)
]
# setup optimizer for training
# to_opt = model.parameters()
# Bugfix, since model will not train with requires grad variables
to_opt = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(to_opt, lr=args.lr)
schedule = spk.train.ReduceLROnPlateauHook(optimizer, patience=args.lr_patience, factor=args.lr_decay,
min_lr=args.lr_min,
window_length=1, stop_after_min=True)
hooks.append(schedule)
# index into model output: [energy, forces]
metrics = [spk.metrics.MeanAbsoluteError(MD17.energies, "y"),
spk.metrics.RootMeanSquaredError(MD17.energies, "y"),
spk.metrics.MeanAbsoluteError(MD17.forces, "dydx"),
spk.metrics.RootMeanSquaredError(MD17.forces, "dydx")]
if args.logger == 'csv':
logger = spk.train.CSVHook(os.path.join(args.modelpath, 'log'),
metrics, every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
elif args.logger == 'tensorboard':
logger = spk.train.TensorboardHook(os.path.join(args.modelpath, 'log'),
metrics, every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
# setup loss function
def loss(batch, result):
ediff = batch[MD17.energies] - result["y"]
ediff = ediff ** 2
fdiff = batch[MD17.forces] - result["dydx"]
fdiff = fdiff ** 2
err_sq = args.rho * torch.mean(ediff.view(-1)) + (1 - args.rho) * torch.mean(fdiff.view(-1))
return err_sq
trainer = spk.train.Trainer(args.modelpath, model, loss, optimizer,
train_loader, val_loader, hooks=hooks)
trainer.train(device)
def evaluate(args, model, train_loader, val_loader, test_loader, device):
header = ['Subset', 'Energy MAE', 'Energy RMSE',
'Force MAE', 'Force RMSE', 'Force Length MAE', 'Force Length RMSE', 'Force Angle MAE', 'Angle RMSE']
metrics = [
spk.metrics.MeanAbsoluteError(MD17.energies, "y"),
spk.metrics.RootMeanSquaredError(MD17.energies, "y"),
spk.metrics.MeanAbsoluteError(MD17.forces, "dydx"),
spk.metrics.RootMeanSquaredError(MD17.forces, "dydx"),
spk.metrics.LengthMAE(MD17.forces, "dydx"),
spk.metrics.LengthRMSE(MD17.forces, "dydx"),
spk.metrics.AngleMAE(MD17.forces, "dydx"),
spk.metrics.AngleRMSE(MD17.forces, "dydx")
]
results = []
if 'train' in args.split:
results.append(['training'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, train_loader, device)])
if 'validation' in args.split:
results.append(['validation'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, val_loader, device)])
if 'test' in args.split:
results.append(['test'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, test_loader, device)])
header = ','.join(header)
results = np.array(results)
np.savetxt(os.path.join(args.modelpath, 'evaluation.csv'), results, header=header, fmt='%s', delimiter=',')
def evaluate_dataset(metrics, model, loader, device):
for metric in metrics:
metric.reset()
for batch in loader:
batch = {
k: v.to(device)
for k, v in batch.items()
}
result = model(batch)
for metric in metrics:
metric.add_batch(batch, result)
results = [
metric.aggregate() for metric in metrics
]
return results
def get_model(args, atomref=None, mean=None, stddev=None, train_loader=None, parallelize=False, mode='train'):
if args.model == 'schnet':
representation = spk.representation.SchNet(args.features, args.features, args.interactions,
args.cutoff, args.num_gaussians)
atomwise_output = spk.atomistic.Energy(args.features, mean=mean, stddev=stddev, atomref=atomref,
return_force=True, create_graph=True)
model = spk.atomistic.AtomisticModel(representation, atomwise_output)
elif args.model == 'wacsf':
sfmode = ('weighted', 'Behler')[args.behler]
# Convert element strings to atomic charges
elements = frozenset((atomic_numbers[i] for i in sorted(args.elements)))
representation = spk.representation.BehlerSFBlock(args.radial, args.angular, zetas=set(args.zetas),
cutoff_radius=args.cutoff,
centered=args.centered, crossterms=args.crossterms,
elements=elements,
mode=sfmode)
logging.info("Using {:d} {:s}-type SF".format(representation.n_symfuncs, sfmode))
# Standardize representation if requested
if args.standardize and mode == 'train':
if train_loader is None:
raise ValueError("Specification of a trainig_loader is required to standardize wACSF")
else:
logging.info("Computing and standardizing symmetry function statistics")
else:
train_loader = None
representation = spk.representation.StandardizeSF(representation, train_loader, cuda=args.cuda)
# Build HDNN model
atomwise_output = spk.atomistic.ElementalEnergy(representation.n_symfuncs, n_hidden=args.n_nodes,
n_layers=args.n_layers, mean=mean, stddev=stddev,
atomref=atomref, return_force=True, create_graph=True,
elements=elements)
model = spk.atomistic.AtomisticModel(representation, atomwise_output)
else:
raise ValueError('Unknown model class:', args.model)
if parallelize:
model = nn.DataParallel(model)
logging.info("The model you built has: %d parameters" % compute_params(model))
return model
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
argparse_dict = vars(args)
jsonpath = os.path.join(args.modelpath, 'args.json')
if args.mode == 'train':
if args.overwrite and os.path.exists(args.modelpath):
logging.info('existing model will be overwritten...')
rmtree(args.modelpath)
if not os.path.exists(args.modelpath):
os.makedirs(args.modelpath)
to_json(jsonpath, argparse_dict)
spk.utils.set_random_seed(args.seed)
train_args = args
else:
train_args = read_from_json(jsonpath)
# will download md17 if necessary, calculate_triples is required for wACSF angular functions
logging.info('MD17 will be loaded...')
md17 = MD17(args.datapath, args.molecule, download=True, parse_all=False, collect_triples=args.model == 'wacsf')
# splits the dataset in test, val, train sets
split_path = os.path.join(args.modelpath, 'split.npz')
if args.mode == 'train':
if args.split_path is not None:
copyfile(args.split_path, split_path)
logging.info('create splits...')
data_train, data_val, data_test = md17.create_splits(*train_args.split, split_file=split_path)
logging.info('load data...')
train_loader = spk.data.AtomsLoader(data_train, batch_size=args.batch_size, sampler=RandomSampler(data_train),
num_workers=4, pin_memory=True)
val_loader = spk.data.AtomsLoader(data_val, batch_size=args.batch_size, num_workers=2, pin_memory=True)
if args.mode == 'train':
logging.info('calculate statistics...')
mean, stddev = train_loader.get_statistics(MD17.energies, True)
else:
mean, stddev = None, None
# Construct the model.
model = get_model(train_args, mean=mean, stddev=stddev, train_loader=train_loader, parallelize=args.parallel,
mode=args.mode).to(device)
if args.mode == 'eval':
if args.parallel:
model.module.load_state_dict(
torch.load(os.path.join(args.modelpath, 'best_model')))
else:
model.load_state_dict(
torch.load(os.path.join(args.modelpath, 'best_model')))
if args.mode == 'train':
logging.info("training...")
train(args, model, train_loader, val_loader, device)
logging.info("...training done!")
elif args.mode == 'eval':
logging.info("evaluating...")
test_loader = spk.data.AtomsLoader(data_test, batch_size=args.batch_size,
num_workers=2, pin_memory=True)
evaluate(args, model, train_loader, val_loader, test_loader, device)
logging.info("... done!")
else:
print('Unknown mode:', args.mode)
|
[] |
[] |
[
"LOGLEVEL"
] |
[]
|
["LOGLEVEL"]
|
python
| 1 | 0 | |
tests/unit/template_medusa_config_test.go
|
package unit_test
import (
"path/filepath"
"github.com/gruntwork-io/terratest/modules/helm"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
var _ = Describe("Verify medusa config template", func() {
var (
helmChartPath string
)
BeforeEach(func() {
path, err := filepath.Abs(ChartsPath)
Expect(err).To(BeNil())
helmChartPath = path
})
renderTemplate := func(options *helm.Options) bool {
_, renderErr := helm.RenderTemplateE(
GinkgoT(), options, helmChartPath, HelmReleaseName,
[]string{"templates/medusa/medusa-config.yaml"})
return renderErr == nil
}
Context("generating medusa storage properties", func() {
DescribeTable("render template",
func(storageType string, expected bool) {
options := &helm.Options{
KubectlOptions: defaultKubeCtlOptions,
SetValues: map[string]string{
"medusa.enabled": "true",
"medusa.storage": storageType,
"medusa.bucketName": "testbucket",
"medusa.storageSecret": "secretkey",
"medusa.podStorage.size": "30Gi",
"medusa.podStorage.storageClass": "nfs",
},
}
Expect(renderTemplate(options)).To(Equal(expected))
},
Entry("supported s3", "s3", true),
Entry("supported s3 compatible", "s3_compatible", true),
Entry("supported google_storage", "google_storage", true),
Entry("supported azure_blobs", "azure_blobs", true),
Entry("supported local", "local", true),
Entry("unsupported ibm_storage (use s3_compatible instead)", "ibm_storage", false),
Entry("unsupported value", "random", false),
)
})
})
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
third_party/github.com/sirupsen/logrus/glog_formatter.go
|
// Copyright 2021 The searKing Author. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import (
"bytes"
"fmt"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/searKing/golang/go/runtime/goroutine"
strings_ "github.com/searKing/golang/go/strings"
time_ "github.com/searKing/golang/go/time"
"github.com/sirupsen/logrus"
)
const (
red = 31
yellow = 33
blue = 36
gray = 37
)
var (
timeNow = time.Now // Stubbed out for testing.
baseTimestamp time.Time
getPid = os.Getpid // Stubbed out for testing.
)
func init() {
baseTimestamp = timeNow()
}
// GlogFormatter formats logs into text
// https://medium.com/technical-tips/google-log-glog-output-format-7eb31b3f0ce5
// [IWEF]yyyymmdd hh:mm:ss.uuuuuu threadid file:line] msg
// IWEF — Log Levels, I for INFO, W for WARNING, E for ERROR and `F` for FATAL.
// yyyymmdd — Year, Month and Date.
// hh:mm:ss.uuuuuu — Hours, Minutes, Seconds and Microseconds.
// threadid — PID/TID of the process/thread.
// file:line — File name and line number.
// msg — Actual user-specified log message.
type GlogFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Force quoting of all values
ForceQuote bool
// DisableQuote disables quoting for all values.
// DisableQuote will have a lower priority than ForceQuote.
// If both of them are set to true, quote will be forced on all values.
DisableQuote bool
// ForceGoroutineId enables goroutine id instead of pid.
ForceGoroutineId bool
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
EnvironmentOverrideColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable the time passed since beginning of execution instead of
// logging the full timestamp when a TTY is attached.
SinceStartTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// The keys sorting function, when uninitialized it uses sort.Strings.
SortingFunc func([]string)
// replace level.String()
LevelStringFunc func(level logrus.Level) string
// Set the truncation of the level text to n characters.
// >0, truncate the level text to n characters at most.
// =0, truncate the level text to 1 characters at most.
// <0, don't truncate
LevelTruncationLimit int
// Disables the glog style :[IWEF]yyyymmdd hh:mm:ss.uuuuuu threadid file:line] msg msg...
// replace with :[IWEF] [yyyymmdd] [hh:mm:ss.uuuuuu] [threadid] [file:line] msg msg...
HumanReadable bool
// PadLevelText Adds padding the level text so that all the levels output at the same length
// PadLevelText is a superset of the DisableLevelTruncation option
PadLevelText bool
// WithFuncName append Caller's func name
WithFuncName bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &GlogFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
// CallerPrettyfier can be set by the user to modify the content
// of the function and file keys in the data when ReportCaller is
// activated. If any of the returned value is the empty string the
// corresponding key will be removed from fields.
CallerPrettyfier func(*runtime.Frame) (function string, file string)
terminalInitOnce sync.Once
// The max length of the level text, generated dynamically on init
levelTextMaxLength int
pid int
}
func NewGlogFormatter() *GlogFormatter {
return &GlogFormatter{}
}
func NewGlogEnhancedFormatter() *GlogFormatter {
return &GlogFormatter{
DisableQuote: true,
LevelTruncationLimit: 5,
PadLevelText: true,
HumanReadable: true,
WithFuncName: true,
QuoteEmptyFields: true,
DisableSorting: true,
LevelStringFunc: func(level logrus.Level) string {
if level == logrus.WarnLevel {
return "WARN"
}
return strings.ToUpper(level.String())
},
}
}
func (f *GlogFormatter) init(entry *logrus.Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
}
// Get the max length of the level text
for _, level := range logrus.AllLevels {
levelTextLength := utf8.RuneCount([]byte(f.levelString(level)))
if levelTextLength > f.levelTextMaxLength {
f.levelTextMaxLength = levelTextLength
}
}
f.pid = getPid()
}
func (f *GlogFormatter) levelString(level logrus.Level) string {
if f.LevelStringFunc != nil {
return f.LevelStringFunc(level)
}
return strings.ToUpper(level.String())
}
func (f *GlogFormatter) isColored() bool {
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
if f.EnvironmentOverrideColors {
switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
case ok && force != "0":
isColored = true
case ok && force == "0", os.Getenv("CLICOLOR") == "0":
isColored = false
}
}
return isColored && !f.DisableColors
}
// Format renders a single log entry
func (f *GlogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
data := make(logrus.Fields)
for k, v := range entry.Data {
data[k] = v
}
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
keys := make([]string, 0, len(data))
for k := range data {
if k == logrus.ErrorKey {
continue
}
keys = append(keys, k)
}
fixedKeys := make([]string, 0, 4+len(data))
if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
}
if _, has := data[logrus.ErrorKey]; has {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(fieldKey(logrus.ErrorKey)))
}
if !f.DisableSorting {
if f.SortingFunc == nil {
sort.Strings(keys)
} else {
f.SortingFunc(keys)
}
fixedKeys = append(fixedKeys, keys...)
} else {
fixedKeys = append(fixedKeys, keys...)
}
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
f.terminalInitOnce.Do(func() { f.init(entry) })
levelColor, levelText := f.level(entry.Level)
b.Write(f.header(entry, 0, levelColor, levelText))
for _, key := range fixedKeys {
var value interface{}
switch {
case key == f.FieldMap.resolve(FieldKeyMsg):
// Remove a single newline if it already exists in the message to keep
// the behavior of logrus glog_formatter the same as the stdlib log package
if levelColor > 0 {
value = strings.TrimSuffix(entry.Message, "\n")
} else {
value = entry.Message
}
if levelColor > 0 {
fmt.Fprintf(b, "\x1b[0m")
f.appendMessage(b, value)
} else {
f.appendMessage(b, value)
}
continue
case key == f.FieldMap.resolve(fieldKey(logrus.ErrorKey)):
value = data[logrus.ErrorKey]
default:
value = data[key]
}
if levelColor > 0 {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m=", levelColor, key)
f.appendValue(b, value)
} else {
key = fmt.Sprintf("%s", key)
f.appendKeyValue(b, key, value)
}
}
if levelColor <= 0 {
b.WriteByte('\n')
}
return b.Bytes(), nil
}
func (f *GlogFormatter) needsQuoting(text string, message bool) bool {
if f.ForceQuote {
return true
}
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
if f.DisableQuote {
return false
}
if message {
return false
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
return false
}
func (f *GlogFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
if b.Len() > 0 {
b.WriteString(", ")
}
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
}
func (f *GlogFormatter) appendValue(b *bytes.Buffer, value interface{}) {
stringVal, ok := value.(string)
if !ok {
stringVal = fmt.Sprint(value)
}
if !f.needsQuoting(stringVal, false) {
b.WriteString(stringVal)
} else {
b.WriteString(fmt.Sprintf("%q", stringVal))
}
}
func (f *GlogFormatter) appendMessage(b *bytes.Buffer, value interface{}) {
stringVal, ok := value.(string)
if !ok {
stringVal = fmt.Sprint(value)
}
if !f.needsQuoting(stringVal, true) {
b.WriteString(stringVal)
} else {
b.WriteString(fmt.Sprintf("%q", stringVal))
}
}
/*
header formats a log header as defined by the C++ implementation.
It returns a buffer containing the formatted header and the user's file and line number.
The depth specifies how many stack frames above lives the source line to be identified in the log message.
Log lines have this form:
[IWEF]yyyymmdd hh:mm:ss.uuuuuu threadid file:line(func)] ms
where the fields are defined as follows:
L A single character, representing the log level (eg 'I' for INFO)
mm The month (zero padded; ie May is '05')
dd The day (zero padded)
hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
threadid The space-padded thread ID as returned by GetTID()
file The file name
line The line number
msg The user-supplied message
*/
func (f *GlogFormatter) header(entry *logrus.Entry, depth int, levelColor int, levelText string) []byte {
var function string
var fileline string
if !entry.HasCaller() {
_, file, line, ok := runtime.Caller(3 + depth)
if !ok {
file = "???"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
fileline = fmt.Sprintf("%s:%d", file, line)
} else {
var file = "???"
if f.CallerPrettyfier != nil {
function, file = f.CallerPrettyfier(entry.Caller)
} else {
function = entry.Caller.Function
file = entry.Caller.File
line := entry.Caller.Line
if line < 0 {
line = 0 // not a real line number, but acceptable to someDigits
}
slash := strings.LastIndex(function, ".")
if slash >= 0 {
function = function[slash+1:]
}
slash = strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
fileline = fmt.Sprintf("%s:%d", file, line)
}
}
return f.formatHeader(entry, levelColor, levelText, fileline, function)
}
func (f *GlogFormatter) level(level logrus.Level) (levelColor int, levelText string) {
if level > logrus.TraceLevel {
level = logrus.InfoLevel // for safety.
}
if f.isColored() {
switch level {
case logrus.DebugLevel, logrus.TraceLevel:
levelColor = gray
case logrus.WarnLevel:
levelColor = yellow
case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
levelColor = red
default:
levelColor = blue
}
}
levelText = f.levelString(level)
{
limit := f.LevelTruncationLimit
if limit > f.levelTextMaxLength {
limit = f.levelTextMaxLength
}
if limit == 0 {
limit = 1
}
if limit < 0 {
limit = f.levelTextMaxLength
}
if limit > 0 && limit < len(levelText) {
levelText = levelText[0:limit]
}
if f.PadLevelText {
// Generates the format string used in the next line, for example "%-6s" or "%-7s".
// Based on the max level text length.
formatString := "%-" + strconv.Itoa(limit) + "s"
// Formats the level text by appending spaces up to the max length, for example:
// - "INFO "
// - "WARNING"
levelText = fmt.Sprintf(formatString, levelText)
}
}
return levelColor, levelText
}
// formatHeader formats a log header using the provided file name and line number.
func (f *GlogFormatter) formatHeader(entry *logrus.Entry, levelColor int, levelText, fileline string, function string) []byte {
var buf bytes.Buffer
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
if levelColor > 0 {
switch {
case f.DisableTimestamp:
buf.WriteString(fmt.Sprintf("\x1b[%dm%s\x1b[0m", levelColor, levelText))
case f.SinceStartTimestamp:
buf.WriteString(fmt.Sprintf("\x1b[%dm%s\x1b[0m[%04d]", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second)))
default:
buf.WriteString(fmt.Sprintf("\x1b[%dm%s\x1b[0m[%s]", levelColor, levelText,
entry.Time.Format(strings_.ValueOrDefault(f.TimestampFormat, time_.GLogDate))))
}
} else {
// Log line format: [IWEF]yyyymmdd hh:mm:ss.uuuuuu threadid file:line] msg
// I20200308 23:47:32.089828 400441 config.cc:27] Loading user configuration: /home/aesophor/.config/wmderland/config
switch {
case f.DisableTimestamp:
if f.HumanReadable {
buf.WriteString(fmt.Sprintf("[%s]", levelText))
} else {
buf.WriteString(fmt.Sprintf("%s", levelText))
}
case f.SinceStartTimestamp:
if f.HumanReadable {
buf.WriteString(fmt.Sprintf("[%s] [%04d]", levelText, int(entry.Time.Sub(baseTimestamp)/time.Second)))
} else {
buf.WriteString(fmt.Sprintf("%s%04d", levelText, int(entry.Time.Sub(baseTimestamp)/time.Second)))
}
default:
layout := strings_.ValueOrDefault(f.TimestampFormat, time_.GLogDate)
var formatString string
if f.HumanReadable {
formatString = "[%s] [%s]"
} else {
formatString = "%s%s"
}
buf.WriteString(fmt.Sprintf(formatString, levelText, entry.Time.Format(layout)))
}
}
if f.ForceGoroutineId {
if f.HumanReadable {
buf.WriteString(fmt.Sprintf(" [%-3d]", goroutine.ID()))
} else {
buf.WriteString(fmt.Sprintf(" %-3d", goroutine.ID()))
}
} else {
if f.HumanReadable {
buf.WriteString(fmt.Sprintf(" [%d]", f.pid))
} else {
buf.WriteString(fmt.Sprintf(" %d", f.pid))
}
}
if f.WithFuncName && function != "" {
if f.HumanReadable {
buf.WriteString(fmt.Sprintf(" [%s](%s)", fileline, function))
} else {
buf.WriteString(fmt.Sprintf(" %s(%s)]", fileline, function))
}
} else {
if f.HumanReadable {
buf.WriteString(fmt.Sprintf(" [%s]", fileline))
} else {
buf.WriteString(fmt.Sprintf(" %s]", fileline))
}
}
buf.WriteString(" ")
return buf.Bytes()
}
|
[
"\"CLICOLOR\""
] |
[] |
[
"CLICOLOR"
] |
[]
|
["CLICOLOR"]
|
go
| 1 | 0 | |
train.py
|
from __future__ import print_function
import sys
if len(sys.argv) != 4:
print('Usage:')
print('python train.py datacfg cfgfile weightfile')
exit()
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
from torch.autograd import Variable
import dataset
import random
import math
import os
from utils import *
from cfg import parse_cfg
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet
# Training settings
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
#Train parameters
max_epochs = max_batches*batch_size/nsamples+1
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 10 # epoches
dot_interval = 70 # batches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
if not os.path.exists(backupdir):
os.mkdir(backupdir)
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
model = Darknet(cfgfile)
region_loss = model.loss
model.load_weights(weightfile)
layers = list(model.children())[0]
for item in list(layers)[:-2]:
item.requires_grad=False
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen/batch_size
init_width = model.width
init_height = model.height
init_epoch = 0 #model.seen/nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay*batch_size}]
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, dampening=0, weight_decay=decay*batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr/batch_size
return lr
def train(epoch):
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = learning_rate #adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target) in enumerate(train_loader):
t2 = time.time()
# adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
#if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
#target= target.cuda()
t3 = time.time()
data, target = Variable(data), Variable(target)
t4 = time.time()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2-t1)
avg_time[1] = avg_time[1] + (t3-t2)
avg_time[2] = avg_time[2] + (t4-t3)
avg_time[3] = avg_time[3] + (t5-t4)
avg_time[4] = avg_time[4] + (t6-t5)
avg_time[5] = avg_time[5] + (t7-t6)
avg_time[6] = avg_time[6] + (t8-t7)
avg_time[7] = avg_time[7] + (t9-t8)
avg_time[8] = avg_time[8] + (t9-t1)
('-------------------------------')
print(' load data : %f' % (avg_time[0]/(batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1]/(batch_idx)))
print('cuda to variable : %f' % (avg_time[2]/(batch_idx)))
print(' zero_grad : %f' % (avg_time[3]/(batch_idx)))
print(' forward feature : %f' % (avg_time[4]/(batch_idx)))
print(' forward loss : %f' % (avg_time[5]/(batch_idx)))
print(' backward : %f' % (avg_time[6]/(batch_idx)))
print(' step : %f' % (avg_time[7]/(batch_idx)))
print(' total : %f' % (avg_time[8]/(batch_idx)))
t1 = time.time()
print('')
t1 = time.time()
logging('training with %f samples/s' % (len(train_loader.dataset)/(t1-t0)))
if (epoch+1) % save_interval == 0:
logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch+1))
def test(epoch):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data).data
all_boxes = get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i]
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals+1
for i in range(num_gts):
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, truths[i][0]]
best_iou = 0
best_j = -1
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
if iou > best_iou:
best_j = j
best_iou = iou
if best_iou > iou_thresh and boxes[best_j][6] == box_gt[6]:
correct = correct+1
precision = 1.0*correct/(proposals+eps)
recall = 1.0*correct/(total+eps)
fscore = 2.0*precision*recall/(precision+recall+eps)
logging("precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore))
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
for epoch in range(int(init_epoch), int(max_epochs)):
train(epoch)
test(epoch)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
contracthandler/Datatypes.go
|
package contracthandler
import (
"bytes"
"encoding/hex"
"github.com/synechron-finlabs/quorum-maker-nodemanager/util"
"regexp"
"strings"
)
var mdt map[*regexp.Regexp]DataType
func init() {
mdt = make(map[*regexp.Regexp]DataType)
mdt[regexp.MustCompile(`^u?int(([1-9]|[1-5][0-9])|(6[0-4]))$`)] = Uint{}
mdt[regexp.MustCompile(`^bool$`)] = Bool{}
mdt[regexp.MustCompile(`^u?int(([1-9]|[1-5][0-9])|(6[0-4]))\[[0-9]+\]$`)] = UintFA{}
mdt[regexp.MustCompile(`^bytes$`)] = Bytes{}
mdt[regexp.MustCompile(`^u?int(([1-9]|[1-5][0-9])|(6[0-4]))\[\]$`)] = UintDA{}
mdt[regexp.MustCompile(`^string$`)] = String{}
mdt[regexp.MustCompile(`^bytes([1-9]|1[0-9]|2[0-9]|3[0-2])\[\]$`)] = Bytes32DA{}
mdt[regexp.MustCompile(`^bytes([1-9]|1[0-9]|2[0-9]|3[0-2])\[[0-9]+\]$`)] = Bytes32FA{}
mdt[regexp.MustCompile(`^bytes([1-9]|1[0-9]|2[0-9]|3[0-2])$`)] = BytesFixed{}
mdt[regexp.MustCompile(`^u?int(6[5-9]|[7-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-6])$`)] = UintLarge{}
mdt[regexp.MustCompile(`^u?int(6[5-9]|[7-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-6])\[[0-9]+\]$`)] = UintLargeFA{}
mdt[regexp.MustCompile(`^u?int(6[5-9]|[7-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-6])\[\]$`)] = UintLargeDA{}
mdt[regexp.MustCompile(`^address$`)] = Address{}
mdt[regexp.MustCompile(`^address\[[0-9]+\]$`)] = AddressFA{}
mdt[regexp.MustCompile(`^address\[\]$`)] = AddressDA{}
}
/*
* supports below formats
* functionName(datatype1, datatype2,...)
* datatype1, datatype2,...
* datatype1, datatype2,.., (with an extra comma and the end
*
*/
func IsSupported(sig string) bool {
rex := regexp.MustCompile(`.*\((.*)\)|(.*)`)
var datatypes string
if match := rex.FindStringSubmatch(sig); match[1] != "" {
datatypes = match[1]
}else{
datatypes = strings.TrimSuffix(match[2], ",")
}
if datatypes == "" {
return true
}
for _, param := range strings.Split(datatypes, ",") {
var found bool
for k := range mdt {
if k.MatchString(param) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
type DataType interface {
IsDynamic() bool
Length() int
Encode() []string
New(interface{}, string) DataType
Decode([]string, int) (int, interface{})
}
type BaseDataType struct {
value interface{}
signature string
}
type Uint struct {
BaseDataType
}
func (t Uint) New(i interface{}, sig string) DataType {
return Uint{BaseDataType{i, sig}}
}
func (t Uint) IsDynamic() bool {
return false
}
func (t Uint) Length() int {
return 1
}
func (t Uint) Decode(data []string, index int) (int, interface{}) {
return 1, util.StringToInt(data[index])
}
func (t Uint) Encode() []string {
i := t.value.(int)
return []string{util.IntToString(i)}
}
type Bool struct {
Uint
}
func (t Bool) New(i interface{}, sig string) DataType {
if i != nil && i.(bool) {
return Bool{Uint{BaseDataType{1, sig}}}
}
return Bool{Uint{BaseDataType{0, sig}}}
}
func (t Bool) Decode(data []string, index int) (int, interface{}) {
_, t.value = t.Uint.Decode(data, index)
return 1, t.value == 1
}
type UintDA struct {
BaseDataType
}
func (t UintDA) New(i interface{}, sig string) DataType {
return UintDA{BaseDataType{i, sig}}
}
func (t UintDA) IsDynamic() bool {
return true
}
func (t UintDA) Length() int {
i := t.value.([]int)
return len(i) + 1
}
func (t UintDA) Decode(data []string, index int) (int, interface{}) {
offset := util.StringToInt(data[index])
length := util.StringToInt(data[offset/32])
var a = make([]int, length)
for i, j := offset/32+1, 0; i < offset/32+1+length; i++ {
a[j] = util.StringToInt(data[i])
j++
}
return 1, a
}
func (t UintDA) Encode() []string {
i := t.value.([]int)
r := make([]string, len(i)+1)
r[0] = util.IntToString(len(i))
for j := 1; j <= len(i); j++ {
r[j] = util.IntToString(i[j-1])
}
return r
}
type UintFA struct {
UintDA
}
func (t UintFA) New(i interface{}, sig string) DataType {
return UintFA{UintDA{BaseDataType{i, sig}}}
}
func (t UintFA) IsDynamic() bool {
return false
}
func (t UintFA) Length() int {
i := t.value.([]int)
return len(i)
}
func (t UintFA) Encode() []string {
i := t.value.([]int)
var output []string
for _, v := range i {
output = append(output, util.IntToString(v))
}
return output
}
func (t UintFA) Decode(data []string, index int) (int, interface{}) {
length := util.StringToInt(util.Between(t.signature, "[", "]"))
var a = make([]int, length)
for i, j := index, 0; j < length; i++ {
a[j] = util.StringToInt(data[i])
j++
}
return length, a
}
type UintLarge struct {
BaseDataType
}
func (t UintLarge) New(i interface{}, sig string) DataType {
return UintLarge{BaseDataType{i, sig}}
}
func (t UintLarge) IsDynamic() bool {
return false
}
func (t UintLarge) Length() int {
return 1
}
func (t UintLarge) Decode(data []string, index int) (int, interface{}) {
return 1, util.DecodeLargeInt(data[index])
}
func (t UintLarge) Encode() []string {
i := t.value.(string)
return []string{util.EncodeLargeInt(i)}
}
type UintLargeDA struct {
BaseDataType
}
func (t UintLargeDA) New(i interface{}, sig string) DataType {
return UintLargeDA{BaseDataType{i, sig}}
}
func (t UintLargeDA) IsDynamic() bool {
return true
}
func (t UintLargeDA) Length() int {
i := t.value.([]string)
return len(i) + 1
}
func (t UintLargeDA) Decode(data []string, index int) (int, interface{}) {
offset := util.StringToInt(data[index])
length := util.StringToInt(data[offset/32])
var a = make([]string, length)
for i, j := offset/32+1, 0; i < offset/32+1+length; i++ {
a[j] = util.DecodeLargeInt(data[i])
j++
}
return 1, a
}
func (t UintLargeDA) Encode() []string {
i := t.value.([]string)
r := make([]string, len(i)+1)
r[0] = util.IntToString(len(i))
for j := 1; j <= len(i); j++ {
r[j] = util.EncodeLargeInt(i[j-1])
}
return r
}
type UintLargeFA struct {
UintLargeDA
}
func (t UintLargeFA) New(i interface{}, sig string) DataType {
return UintLargeFA{UintLargeDA{BaseDataType{i, sig}}}
}
func (t UintLargeFA) IsDynamic() bool {
return false
}
func (t UintLargeFA) Length() int {
i := t.value.([]string)
return len(i)
}
func (t UintLargeFA) Encode() []string {
i := t.value.([]string)
var output []string
for _, v := range i {
output = append(output, util.EncodeLargeInt(v))
}
return output
}
func (t UintLargeFA) Decode(data []string, index int) (int, interface{}) {
length := util.StringToInt(util.Between(t.signature, "[", "]"))
var a = make([]string, length)
for i, j := index, 0; j < length; i++ {
a[j] = util.DecodeLargeInt(data[i])
j++
}
return length, a
}
type Address struct {
UintLarge
}
func (t Address) New(i interface{}, sig string) DataType {
return Address{UintLarge{BaseDataType{i, sig}}}
}
func (t Address) Decode(data []string, index int) (int, interface{}) {
return 1, "0x" + strings.TrimLeft(data[index], "0")
}
type AddressDA struct {
UintLargeDA
}
func (t AddressDA) New(i interface{}, sig string) DataType {
return AddressDA{UintLargeDA{BaseDataType{i, sig}}}
}
func (t AddressDA) Decode(data []string, index int) (int, interface{}) {
offset := util.StringToInt(data[index])
length := util.StringToInt(data[offset/32])
var a = make([]string, length)
for i, j := offset/32+1, 0; i < offset/32+1+length; i++ {
a[j] = "0x" + strings.TrimLeft(data[i], "0")
j++
}
return 1, a
}
type AddressFA struct {
UintLargeFA
}
func (t AddressFA) New(i interface{}, sig string) DataType {
return AddressFA{UintLargeFA{UintLargeDA{BaseDataType{i, sig}}}}
}
func (t AddressFA) Decode(data []string, index int) (int, interface{}) {
length := util.StringToInt(util.Between(t.signature, "[", "]"))
var a = make([]string, length)
for i, j := index, 0; j < length; i++ {
a[j] = "0x" + strings.TrimLeft(data[i], "0")
j++
}
return length, a
}
type Bytes struct {
BaseDataType
}
func (t Bytes) New(i interface{}, sig string) DataType {
return Bytes{BaseDataType{i, sig}}
}
func (t Bytes) IsDynamic() bool {
return true
}
func (t Bytes) Length() int {
i := t.value.([]byte)
if len(i)%32 == 0 {
return len(i)/32 + 1
}
return len(i)/32 + 2
}
func (t Bytes) Decode(data []string, index int) (int, interface{}) {
offset := util.StringToInt(data[index])
length := util.StringToInt(data[offset/32])
var buffer bytes.Buffer
for i, c := offset/32+1, 0; c < length; i++ {
buffer.WriteString(data[i])
c += 32
}
t.value, _ = hex.DecodeString(buffer.String()[:length*2])
return 1, t.value
}
func (t Bytes) Encode() []string {
s := t.value.([]byte)
var d []string
d = append(d, util.IntToString(len(s)))
limit := 0
if len(s)%32 == 0 {
limit = len(s) / 32
} else {
limit = len(s)/32 + 1
}
for i := 0; i < limit; i++ {
j := i * 32
k := j + 32
var b []byte
if k > len(s) {
b = make([]byte, 32)
copy(b, s[j:])
} else {
b = s[j:k]
}
d = append(d, hex.EncodeToString(b))
}
return d
}
type String struct {
Bytes
}
func (t String) New(i interface{}, sig string) DataType {
if i == nil {
i = ""
}
return String{Bytes{BaseDataType{[]byte(i.(string)), sig}}}
}
func (t String) Decode(data []string, index int) (int, interface{}) {
_, t.value = t.Bytes.Decode(data, index)
return 1, string(t.value.([]byte))
}
type Bytes32DA struct {
BaseDataType
}
func (t Bytes32DA) New(i interface{}, sig string) DataType {
return Bytes32DA{BaseDataType{i, sig}}
}
func (t Bytes32DA) IsDynamic() bool {
return true
}
func (t Bytes32DA) Length() int {
i := t.value.([]int)
return len(i) + 1
}
func (t Bytes32DA) Decode(data []string, index int) (int, interface{}) {
offset := util.StringToInt(data[index])
length := util.StringToInt(data[offset/32])
var a = make([][]byte, length)
for i, j := offset/32+1, 0; j < length; i++ {
a[j], _ = hex.DecodeString(strings.Replace(data[i], "00", "",-1))
j++
}
t.value = a
return 1, a
}
func (t Bytes32DA) Encode() []string {
i := t.value.([][]byte)
r := make([]string, len(i)+1)
r[0] = util.IntToString(len(i))
for j := 1; j <= len(i); j++ {
b := make([]byte, 32)
copy(b, i[j-1])
r[j] = hex.EncodeToString(b)
}
return r
}
type Bytes32FA struct {
BaseDataType
}
func (t Bytes32FA) New(i interface{}, sig string) DataType {
return Bytes32FA{BaseDataType{i, sig}}
}
func (t Bytes32FA) IsDynamic() bool {
return false
}
func (t Bytes32FA) Length() int {
i := t.value.([][]byte)
return len(i)
}
func (t Bytes32FA) Decode(data []string, index int) (int, interface{}) {
length := util.StringToInt(util.Between(t.signature, "[", "]"))
var a = make([][]byte, length)
for i, j := index, 0; j < length; i++ {
a[j], _ = hex.DecodeString(strings.Replace(data[i], "00", "",-1))
j++
}
t.value = a
return length, a
}
func (t Bytes32FA) Encode() []string {
i := t.value.([][]byte)
r := make([]string, len(i))
for j := 0; j < len(i); j++ {
b := make([]byte, 32)
copy(b, i[j])
r[j] = hex.EncodeToString(b)
}
return r
}
type BytesFixed struct {
Uint
}
func (t BytesFixed) New(i interface{}, sig string) DataType {
return BytesFixed{Uint{BaseDataType{i, sig}}}
}
func (t BytesFixed) Decode(data []string, index int) (int, interface{}) {
t.value, _ = hex.DecodeString(strings.Replace(data[index], "00", "",-1))
return 1, t.value
}
func (t BytesFixed) Encode() []string {
i := t.value.([]byte)
b := make([]byte, 32)
copy(b, i)
return []string{hex.EncodeToString(b)}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
NESS/Spitzer/SHA_Query_Mosaic.py
|
# Identify location
import socket
location = socket.gethostname()
if location == 'Monolith':
dropbox = 'E:\\Users\\Chris\\Dropbox\\'
if location == 'Hobbitslayer':
dropbox = 'C:\\Users\\spx7cjc\\Dropbox\\'
if location == 'saruman':
dropbox = '/home/herdata/spx7cjc/Dropbox/'
# Import smorgasbord
import os
import sys
import multiprocessing as mp
import numpy as np
import astropy.io.votable
from astroquery import sha
import montage_wrapper.commands
import shutil
import signal
import gc
#warnings.filterwarnings("ignore")
from glob import glob
import subprocess
import wget
import pdb
import time
import ChrisFuncs
import ChrisFuncs.Coadd
# Add SWarp directory to path
os.environ['PATH'] = os.environ['PATH'] + ':/home/user/spx7cjc/swarp/bin'
# Define a timeout handler
def Handler(signum, frame):
raise Exception("Timout!")
# Define function to wget and extact Spitzer files
def Spitzer_wget(tile_url, tile_filename):
print('Acquiring '+tile_url)
if os.path.exists(tile_filename):
os.remove(tile_filename)
success = False
while success==False:
try:
wget.download(tile_url, out=tile_filename)
print('Successful acquisition of '+tile_url)
success = True
except:
print('Failure! Retrying acquistion of '+tile_url)
time.sleep(0.1)
success = False
os.system('unzip '+tile_filename)
# Defien function to replace null pixels in SWarp outputs with NaNs
def Spitzer_SWarp_NaN(target):
in_fitsdata = astropy.io.fits.open(target)
in_image = in_fitsdata[0].data
in_header = in_fitsdata[0].header
in_fitsdata.close()
out_image = in_image.copy()
out_image[ np.where( out_image<-1E20 ) ] = np.NaN
out_hdu = astropy.io.fits.PrimaryHDU(data=out_image, header=in_header)
out_hdulist = astropy.io.fits.HDUList([out_hdu])
out_hdulist.writeto(target, clobber=True)
# Commence main task
if __name__ == "__main__":
# Decide what intrument to work for
instrument = 'IRAC'
# Define paths
in_dir = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Temporary_Files/'
out_dir = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Mosaics_'+instrument+'/'
# Read in source catalogue
ness_cat = np.genfromtxt(dropbox+'Work/Tables/NESS/NESS_Sample.csv', delimiter=',', names=True, dtype=None)
name_list = ness_cat['name']
# Read in list of already-Montaged sources
already_processed_path = '/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Spitzer_'+instrument+'_Already_Processed_List.dat'
if not os.path.exists(already_processed_path):
open(already_processed_path,'a')
already_processed = np.genfromtxt(already_processed_path, dtype=('S50')).tolist()
# Identify targets not yet processed
remaining_list = []
for i in range(0, name_list.shape[0]):
already_done = 0
name = name_list[i]
if name not in already_processed:
remaining_list.append(i)
name_list = ness_cat['name']
ra_list = ness_cat['ra']
dec_list = ness_cat['dec']
# State band information
if instrument=='IRAC':
bands_dict = {'3.6um':{'instrument':'IRAC','band_long':'3.6','channel':'ch1','pix_size':0.6},
'4.5um':{'instrument':'IRAC','band_long':'4.5','channel':'ch2','pix_size':0.6},
'5.8um':{'instrument':'IRAC','band_long':'5.8','channel':'ch3','pix_size':0.6},
'8.0um':{'instrument':'IRAC','band_long':'8.0','channel':'ch4','pix_size':0.6}}
elif instrument=='MIPS':
bands_dict = {'24um':{'instrument':'MIPS','band_long':'24','channel':'ch1','pix_size':2.45},
'70um':{'instrument':'MIPS','band_long':'70','channel':'ch2','pix_size':4.0},
'160um':{'instrument':'MIPS','band_long':'160','channel':'ch3','pix_size':8.0}}
# Register signal function handler, for dealing with timeouts
signal.signal(signal.SIGALRM, Handler)
# Record time taken
time_list = [ time.time() ]
# Loop over each source
for i in np.random.permutation(range(0, ness_cat.shape[0])):
name = name_list[i].replace(' ','_')
ra = ra_list[i]
dec = dec_list[i]
time_start = time.time()
width = 0.25#
print('Processing source '+name)
# Check if source is in list of already-montaged sources
if name in already_processed:
print(name+' already processed')
continue
# Check which, if any, bands already have data
print('Checking existing finalised cutouts for matches to current source')
bands_dict_req = {}
for band in bands_dict.keys():
if name+'_Spitzer_'+bands_dict[band]['band_long']+'.fits.gz' not in os.listdir('/home/sarumandata2/spx7cjc/NESS/Ancillary_Data/Spitzer/Cutouts/'):
bands_dict_req[band] = bands_dict[band]
# Create tile processing dirctories (deleting any prior), and set appropriate Python (ie, Montage) working directory
gal_dir = in_dir+str(name)+'/'
if os.path.exists(gal_dir):
shutil.rmtree(gal_dir)
os.makedirs(gal_dir)
os.makedirs(gal_dir+'Errors')
os.makedirs(gal_dir+'Raw')
os.chdir(gal_dir+'Raw')
# Perform query, with error handling
print('Querying Spitzer server')
query_success = False
query_fail_count = 0
while query_success==False:
if query_fail_count>=10:
break
try:
print('NOTE: Astroquery currently not working with Spitzer; gettings query results using Spitzer API instead')
"""
query_obj = Spitzer.query(ra=ra, dec=dec, size=width)
"""
query_url = 'http://sha.ipac.caltech.edu/applications/Spitzer/SHA/servlet/DataService?RA='+str(ra)+'&DEC='+str(dec)+'&SIZE='+str(width)+'&VERB=3&DATASET=ivo%3A%2F%2Firsa.csv%2Fspitzer.level2'
query_filename = gal_dir+'Spitzer_Query.csv'
if os.path.exists(query_filename):
os.remove(query_filename)
wget.download(query_url, out=query_filename.replace('.csv','.txt'))
os.system('stilts tcopy ifmt=ipac ofmt=csv '+query_filename.replace('.csv','.txt')+' '+query_filename)
os.remove(query_filename.replace('.csv','.txt'))
query_success = True
except:
print('Spitzer query failed; reattempting')
query_fail_count += 1
if not os.path.exists(query_filename):
query_success=False
if query_success==False:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Save query result (removing pre-existing query file, if present)
"""
query_filename = gal_dir+'Spitzer_Query.csv'
query_obj.write(query_filename, format='csv')
"""
# Establish if any data was found; if not, skip
query_in = np.genfromtxt(query_filename, delimiter=',', names=True, dtype=None)
if query_in.size==0:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Record which urls correspond to data in the desired bands (dealing with awkwardness for if there is only 1 entry, or silly massive files)
Spitzer_urls = []
Spitzer_bands = []
if query_in.size==1:
if query_in['accessWithAnc1Url']!='NONE' and query_in['filesize']<1E9:
for band in bands_dict_req.keys():
if query_in['wavelength']==bands_dict_req[band]['instrument']+' '+band:
Spitzer_urls.append(query_in['accessWithAnc1Url'])
Spitzer_bands.append(band)
else:
for j in range(0, query_in.size):
if query_in[j]['accessWithAnc1Url']!='NONE' and query_in[j]['filesize']<1E9:
for band in bands_dict_req.keys():
if query_in[j]['wavelength']==bands_dict_req[band]['instrument']+' '+band:
Spitzer_urls.append(query_in[j]['accessWithAnc1Url'])
Spitzer_bands.append(band)
# In parallel, download and extract files
os.chdir(gal_dir+'Raw')
dl_pool = mp.Pool(processes=20)
for j in range(0, len(Spitzer_urls)):
tile_url = Spitzer_urls[j]
tile_filename = gal_dir+'Raw/'+name+'_'+Spitzer_bands[j]+'_'+str(j)+'.zip'
dl_pool.apply_async( Spitzer_wget, args=(tile_url, tile_filename,) )#Spitzer_wget(tile_url, tile_filename)
dl_pool.close()
dl_pool.join()
[ os.remove(dl_zip) for dl_zip in os.listdir(gal_dir+'Raw/') if '.zip' in dl_zip ]
# Copy files to relevant folders
for band in bands_dict_req.keys():
if os.path.exists(gal_dir+band+'/'):
shutil.rmtree(gal_dir+band+'/')
if os.path.exists(gal_dir+'Errors/'+band+'/'):
shutil.rmtree(gal_dir+'Errors/'+band+'/')
os.makedirs(gal_dir+band+'/')
os.makedirs(gal_dir+'Errors/'+band+'/')
channel = bands_dict_req[band]['channel']
for dl_folder in os.listdir(gal_dir+'Raw/'):
if os.path.exists(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd'):
for dl_file in os.listdir(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd'):
if '_maic.fits' in dl_file:
shutil.copy2(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd/'+dl_file, gal_dir+band)
if '_munc.fits' in dl_file:
shutil.copy2(gal_dir+'Raw/'+dl_folder+'/'+channel+'/pbcd/'+dl_file, gal_dir+band)
shutil.rmtree(gal_dir+'Raw')
# Check that the retrieved files provide actual coverage of the point in question
coverage_bands = []
for band in bands_dict_req.keys():
montage_wrapper.commands.mImgtbl(gal_dir+band, gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', corners=True)
if os.stat(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat').st_size==0:
continue
montage_wrapper.commands_extra.mCoverageCheck(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Overlap_Check.dat', mode='point', ra=ra, dec=dec)
if sum(1 for line in open(gal_dir+band+'/'+band+'_Overlap_Check.dat'))>3:
coverage_bands.append(band)
if len(coverage_bands)==0:
print('No Spitzer data for '+name)
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
time_list.append( time.time() )
shutil.rmtree(gal_dir)
gc.collect()
continue
# Loop over each band for coaddition
for band in coverage_bands:
print('Commencing Montaging and SWarping of '+name+'_Spitzer_'+band)
os.chdir(gal_dir+band)
os.mkdir(gal_dir+band+'/Diffs_Temp')
os.mkdir(gal_dir+band+'/Backsub_Temp')
os.mkdir(gal_dir+band+'/SWarp_Temp')
# Create Montage FITS header
location_string = str(ra)+' '+str(dec)
pix_size = bands_dict_req[band]['pix_size']
montage_wrapper.commands.mHdr(location_string, width, gal_dir+band+'/'+str(name)+'_HDR', pix_size=pix_size)
# Use Montage wrapper to reproject all fits files to common projection, skipping if none acually overlap
print('Performing reporjections for '+name+'_Spitzer_'+band+' maps')
location_string = str(ra)+' '+str(dec)
target_files = []
proj_fail = 0
[ target_files.append(target_file) for target_file in os.listdir(gal_dir+band) if '.fits' in target_file ]
for target_file in target_files:
try:
montage_wrapper.wrappers.reproject(os.path.join(gal_dir+band,target_file), os.path.join(gal_dir+band,target_file), header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
except:
os.remove(os.path.join(gal_dir+band,target_file))
proj_fail += 1
if proj_fail==len(target_files):
print('No Spitzer coverage for '+name+' at '+band)
continue
# Loop over error maps and copy
for listfile in os.listdir(gal_dir+band):
if '_munc.fits' in listfile:
shutil.copy2(gal_dir+band+'/'+listfile, gal_dir+'Errors/'+band)
# Convert error maps to weight maps
unc_fitsdata = astropy.io.fits.open(gal_dir+band+'/'+listfile)
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image = unc_image**-1.0
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(gal_dir+band+'/SWarp_Temp/'+listfile.replace('_munc.fits','_maic.wgt.fits'), clobber=True)
# Delete old uncertainty map
os.remove(gal_dir+band+'/'+listfile)
# If only one image file, proceed straight to co-adding; otherwise, commence background-matching
mosaic_count = 0
for listfile in os.listdir(gal_dir+band):
if '.fits' in listfile:
mosaic_count += 1
if mosaic_count==1:
for listfile in os.listdir(gal_dir+band):
if '.fits' in listfile:
shutil.move(listfile, gal_dir+band+'/SWarp_Temp')
if mosaic_count>1:
# Use Montage wrapper to determine appropriate corrections for background matching
print('Determining background corrections for '+name+'_Spitzer_'+band+' maps')
montage_wrapper.commands.mImgtbl(gal_dir+band, gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', corners=True)
montage_wrapper.commands.mOverlaps(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Diffs_Table.dat')
montage_wrapper.commands.mDiffExec(gal_dir+band+'/'+band+'_Image_Diffs_Table.dat', gal_dir+band+'/'+str(name)+'_HDR', gal_dir+band+'/Diffs_Temp', no_area=True)
montage_wrapper.commands.mFitExec(gal_dir+band+'/'+band+'_Image_Diffs_Table.dat', gal_dir+band+'/'+band+'_Image_Fitting_Table.dat', gal_dir+band+'/Diffs_Temp')
montage_wrapper.commands.mBgModel(gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Fitting_Table.dat', gal_dir+band+'/'+band+'_Image_Corrections_Table.dat', level_only=True, n_iter=16384)
# Apply background corrections using Montage subprocess, with timeout handling
print('Applying background corrections to '+name+'_Spitzer_'+band+' maps')
mBgExec_fail_count = 0
mBgExec_success = False
mBgExec_uberfail = False
while mBgExec_success==False:
# Attempt background-matching
mBgExec_sp = subprocess.Popen( ['/home/soft/montage/bin/mBgExec', '-n', gal_dir+band+'/'+band+'_Image_Metadata_Table.dat', gal_dir+band+'/'+band+'_Image_Corrections_Table.dat', gal_dir+band+'/SWarp_Temp' ], preexec_fn=os.setsid, stdout=subprocess.PIPE )
mBgExec_fail = False
seconds = 0
minutes_max = 45
while mBgExec_fail==False:
time.sleep(1)
mBgExec_stdout = mBgExec_sp.stdout.readline()
if mBgExec_sp.poll()==None:
seconds += 1
if 'Table has no data records' in mBgExec_stdout:
mBgExec_fail = True
mBgExec_fail_count += 1
break
if seconds>=(60*minutes_max):
mBgExec_fail = True
mBgExec_fail_count += 1
break
if mBgExec_sp.poll()!=None:
mBgExec_success = True
break
# Handle timeouts and other failures
if mBgExec_fail_count>0:
print('Background matching with Montage has failed '+str(mBgExec_fail_count)+' time(s); reattempting')
if mBgExec_fail==True and mBgExec_success==False and mBgExec_fail_count>=3:
mBgExec_uberfail = True
print('Background matching with Montage has failed 3 times; proceeding directly to co-additon')
try:
os.killpg( os.getpgid(mBgExec_sp.pid), 15 )
except:
'Background matching subprocess appears to have imploded; no task to kill'
for listfile in os.listdir(gal_dir+band):
if '_maic.fits' in listfile:
shutil.move(listfile, gal_dir+band+'/SWarp_Temp')
break
# Sort out daft filename differences between image maps and error maps
for gal_file in os.listdir(gal_dir+band+'/SWarp_Temp'):
os.rename(gal_dir+band+'/SWarp_Temp/'+gal_file, gal_dir+band+'/SWarp_Temp/'+gal_file.replace('_'+gal_file.split('_')[-2:][0], '') )
# Perform least-squares plane fitting to match MIPS image levels
if instrument=='MIPS':
ChrisFuncs.Coadd.LevelFITS(gal_dir+band+'/SWarp_Temp', 'maic.fits', convfile_dir=False)
# Use SWarp to co-add images weighted by their error maps
print('Co-adding '+name+'_Spitzer_'+band+' maps')
os.chdir(gal_dir+band+'/SWarp_Temp')
os.system('swarp *_maic.fits -IMAGEOUT_NAME '+name+'_Spitzer_'+band+'_SWarp.fits -WEIGHT_SUFFIX .wgt.fits -COMBINE_TYPE WEIGHTED -COMBINE_BUFSIZE 2048 -GAIN_KEYWORD DIESPIZERDIE -RESCALE_WEIGHTS N -SUBTRACT_BACK N -RESAMPLE N -VMEM_MAX 4095 -MEM_MAX 4096 -WEIGHT_TYPE MAP_WEIGHT -NTHREADS 4 -VERBOSE_TYPE QUIET')
Spitzer_SWarp_NaN(name+'_Spitzer_'+band+'_SWarp.fits')
# Re-project finalised image map using Montage
montage_wrapper.wrappers.reproject(gal_dir+band+'/SWarp_Temp/'+name+'_Spitzer_'+band+'_SWarp.fits', out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits', header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
# Compress finalised image map
os.chdir(out_dir)
if os.path.exists(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits.gz'):
os.remove(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits.gz')
os.system('gzip '+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'.fits')
print('Completed Montaging and SWarping '+name+'_Spitzer_'+band+' image map')
# Turn error maps into exposure time maps
for listfile in os.listdir(gal_dir+'Errors/'+band):
if '_munc.fits' in listfile:
unc_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+listfile)
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image = unc_image**-2.0
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(gal_dir+'Errors/'+band+'/'+listfile.replace('_munc.fits','_mexp.fits'), clobber=True)
# Use Montage to add exposure time images
print('Co-adding '+name+'_Spitzer_'+band+' error maps')
target_files = []
[ target_files.append(dir_file) for dir_file in os.listdir(gal_dir+'Errors/'+band) if 'mexp.fits' in dir_file ]
for i in range(0, len(target_files)):
exp_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+target_files[i])
exp_image = exp_fitsdata[0].data
exp_header = exp_fitsdata[0].header
exp_fitsdata.close(gal_dir+'Errors/'+band+'/'+target_files[i])
if i==0:
add_image = np.zeros([ exp_image.shape[0], exp_image.shape[1] ])
add_header = exp_header.copy()
exp_good = np.where( np.isnan(exp_image)==False )
add_image[exp_good] += exp_image[exp_good]
add_hdu = astropy.io.fits.PrimaryHDU(data=add_image, header=add_header)
add_hdulist = astropy.io.fits.HDUList([add_hdu])
add_hdulist.writeto(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp_Add.fits', clobber=True)
# Re-project final exposure map using Montage
montage_wrapper.wrappers.reproject(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp_Add.fits', gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp.fits', header=gal_dir+band+'/'+str(name)+'_HDR', exact_size=True)
# Convert final exposure time map into error map
unc_fitsdata = astropy.io.fits.open(gal_dir+'Errors/'+band+'/'+name+'_Spitzer_'+band+'_Exp.fits')
unc_image = unc_fitsdata[0].data
unc_header = unc_fitsdata[0].header
unc_fitsdata.close()
unc_image[ np.where(unc_image<0) ] = np.NaN
unc_image = unc_image**-0.5
unc_image[ np.where(unc_image==np.inf) ] = np.NaN
unc_hdu = astropy.io.fits.PrimaryHDU(data=unc_image, header=unc_header)
unc_hdulist = astropy.io.fits.HDUList([unc_hdu])
unc_hdulist.writeto(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits', clobber=True)
# Compress finalised exposure time map
os.chdir(out_dir)
if os.path.exists(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits.gz'):
os.remove(out_dir+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits.gz')
os.system('gzip '+name+'_Spitzer_'+bands_dict_req[band]['band_long']+'_Error.fits')
print('Completed Montaging '+name+'_Spitzer_'+band+' error map')
# Record that processing of souce has been compelted
already_processed_file = open(already_processed_path, 'a')
already_processed_file.write(name+'\n')
already_processed_file.close()
# Clean memory, and return timings
shutil.rmtree(gal_dir)
time_list.append( time.time() )
time_est = ChrisFuncs.TimeEst(time_list, len(name_list))
time_file = open( os.path.join('/'.join(in_dir.split('/')[:-2]),'Estimated_Completion_Time.txt'), 'w')
time_file.write(time_est)
time_file.close()
print('Estimated completion time: '+time_est)
# Jubilate
print('All done!')
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
tests/hikari/internal/test_ux.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import builtins
import contextlib
import importlib
import logging
import os
import platform
import string
import sys
import time
import colorlog
import mock
import pytest
from hikari import _about
from hikari.impl import config
from hikari.internal import net
from hikari.internal import ux
from tests.hikari import hikari_test_helpers
class TestInitLogging:
def test_when_handlers_already_set_up(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[None]))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
with stack:
ux.init_logging("LOGGING_LEVEL", True, False)
logging_dict_config.assert_not_called()
logging_basic_config.assert_not_called()
colorlog_basic_config.assert_not_called()
def test_when_handlers_specify_not_to_set_up(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[]))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
with stack:
ux.init_logging(None, True, False)
logging_dict_config.assert_not_called()
logging_basic_config.assert_not_called()
colorlog_basic_config.assert_not_called()
def test_when_flavour_is_a_dict_and_doesnt_define_handlers(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[]))
stack.enter_context(mock.patch.object(ux, "supports_color", return_value=False))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
with stack:
ux.init_logging({"hikari": {"level": "INFO"}}, True, False)
logging_dict_config.assert_called_once_with({"hikari": {"level": "INFO"}})
logging_basic_config.assert_called_once_with(
level=None,
format="%(levelname)-1.1s %(asctime)23.23s %(name)s: %(message)s",
stream=sys.stderr,
)
colorlog_basic_config.assert_not_called()
def test_when_flavour_is_a_dict_and_defines_handlers(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[]))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
with stack:
ux.init_logging({"hikari": {"level": "INFO"}, "handlers": {"some_handler": {}}}, True, False)
logging_dict_config.assert_called_once_with({"hikari": {"level": "INFO"}, "handlers": {"some_handler": {}}})
logging_basic_config.assert_not_called()
colorlog_basic_config.assert_not_called()
def test_when_supports_color(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[]))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
supports_color = stack.enter_context(mock.patch.object(ux, "supports_color", return_value=True))
with stack:
ux.init_logging("LOGGING_LEVEL", True, False)
logging_dict_config.assert_not_called()
logging_basic_config.assert_not_called()
colorlog_basic_config.assert_called_once_with(
level="LOGGING_LEVEL",
format="%(log_color)s%(bold)s%(levelname)-1.1s%(thin)s %(asctime)23.23s %(bold)s%(name)s: "
"%(thin)s%(message)s%(reset)s",
stream=sys.stderr,
)
supports_color.assert_called_once_with(True, False)
def test_when_doesnt_support_color(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(logging, "root", handlers=[]))
logging_dict_config = stack.enter_context(mock.patch.object(logging.config, "dictConfig"))
logging_basic_config = stack.enter_context(mock.patch.object(logging, "basicConfig"))
colorlog_basic_config = stack.enter_context(mock.patch.object(colorlog, "basicConfig"))
supports_color = stack.enter_context(mock.patch.object(ux, "supports_color", return_value=False))
with stack:
ux.init_logging("LOGGING_LEVEL", True, False)
logging_dict_config.assert_not_called()
logging_basic_config.assert_called_once_with(
level="LOGGING_LEVEL",
format="%(levelname)-1.1s %(asctime)23.23s %(name)s: %(message)s",
stream=sys.stderr,
)
colorlog_basic_config.assert_not_called()
supports_color.assert_called_once_with(True, False)
class TestPrintBanner:
def test_when_package_is_none(self):
with mock.patch.object(sys.stdout, "write") as write:
ux.print_banner(None, True, False)
write.assert_not_called()
@pytest.fixture()
def mock_args(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(platform, "release", return_value="1.0.0"))
stack.enter_context(mock.patch.object(platform, "system", return_value="Potato"))
stack.enter_context(mock.patch.object(platform, "machine", return_value="Machine"))
stack.enter_context(mock.patch.object(platform, "python_implementation", return_value="CPython"))
stack.enter_context(mock.patch.object(platform, "python_version", return_value="4.0.0"))
stack.enter_context(mock.patch.object(_about, "__version__", new="2.2.2"))
stack.enter_context(mock.patch.object(_about, "__git_sha1__", new="12345678901234567890"))
stack.enter_context(mock.patch.object(_about, "__copyright__", new="© 2020 Nekokatt"))
stack.enter_context(mock.patch.object(_about, "__license__", new="MIT"))
stack.enter_context(mock.patch.object(_about, "__file__", new="~/hikari"))
stack.enter_context(mock.patch.object(_about, "__docs__", new="https://nekokatt.github.io/hikari/docs"))
stack.enter_context(mock.patch.object(_about, "__discord_invite__", new="https://discord.gg/Jx4cNGG"))
stack.enter_context(mock.patch.object(_about, "__url__", new="https://nekokatt.github.io/hikari"))
with stack:
yield None
def test_when_supports_color(self, mock_args):
stack = contextlib.ExitStack()
stack.enter_context(
mock.patch.object(colorlog.escape_codes, "escape_codes", new={"red": 0, "green": 1, "blue": 2})
)
stack.enter_context(mock.patch.object(time, "sleep"))
supports_color = stack.enter_context(mock.patch.object(ux, "supports_color", return_value=True))
read_text = stack.enter_context(mock.patch.object(importlib.resources, "read_text"))
template = stack.enter_context(mock.patch.object(string, "Template"))
builtins_open = stack.enter_context(mock.patch.object(builtins, "open"))
abspath = stack.enter_context(mock.patch.object(os.path, "abspath", return_value="some path"))
dirname = stack.enter_context(mock.patch.object(os.path, "dirname"))
with stack:
ux.print_banner("hikari", True, False)
args = {
# Hikari stuff.
"hikari_version": "2.2.2",
"hikari_git_sha1": "12345678",
"hikari_copyright": "© 2020 Nekokatt",
"hikari_license": "MIT",
"hikari_install_location": "some path",
"hikari_documentation_url": "https://nekokatt.github.io/hikari/docs",
"hikari_discord_invite": "https://discord.gg/Jx4cNGG",
"hikari_source_url": "https://nekokatt.github.io/hikari",
"python_implementation": "CPython",
"python_version": "4.0.0",
"system_description": "Machine Potato 1.0.0",
"red": 0,
"green": 1,
"blue": 2,
}
template.assert_called_once_with(read_text())
template().safe_substitute.assert_called_once_with(args)
builtins_open.assert_called_once_with(1, "w", encoding="utf-8")
builtins_open.return_value.__enter__.return_value.write.assert_called_once_with(template().safe_substitute())
dirname.assert_called_once_with("~/hikari")
abspath.assert_called_once_with(dirname())
supports_color.assert_called_once_with(True, False)
def test_when_doesnt_supports_color(self, mock_args):
stack = contextlib.ExitStack()
stack.enter_context(
mock.patch.object(colorlog.escape_codes, "escape_codes", new={"red": 0, "green": 1, "blue": 2})
)
stack.enter_context(mock.patch.object(time, "sleep"))
supports_color = stack.enter_context(mock.patch.object(ux, "supports_color", return_value=False))
read_text = stack.enter_context(mock.patch.object(importlib.resources, "read_text"))
template = stack.enter_context(mock.patch.object(string, "Template"))
abspath = stack.enter_context(mock.patch.object(os.path, "abspath", return_value="some path"))
dirname = stack.enter_context(mock.patch.object(os.path, "dirname"))
builtins_open = stack.enter_context(mock.patch.object(builtins, "open"))
with stack:
ux.print_banner("hikari", True, False)
args = {
# Hikari stuff.
"hikari_version": "2.2.2",
"hikari_git_sha1": "12345678",
"hikari_copyright": "© 2020 Nekokatt",
"hikari_license": "MIT",
"hikari_install_location": "some path",
"hikari_documentation_url": "https://nekokatt.github.io/hikari/docs",
"hikari_discord_invite": "https://discord.gg/Jx4cNGG",
"hikari_source_url": "https://nekokatt.github.io/hikari",
"python_implementation": "CPython",
"python_version": "4.0.0",
"system_description": "Machine Potato 1.0.0",
"red": "",
"green": "",
"blue": "",
}
template.assert_called_once_with(read_text())
template().safe_substitute.assert_called_once_with(args)
dirname.assert_called_once_with("~/hikari")
abspath.assert_called_once_with(dirname())
supports_color.assert_called_once_with(True, False)
builtins_open.assert_called_once_with(1, "w", encoding="utf-8")
builtins_open.return_value.__enter__.return_value.write.assert_called_once_with(template().safe_substitute())
def test_use_extra_args(self, mock_args):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(colorlog.escape_codes, "escape_codes", new={}))
stack.enter_context(mock.patch.object(time, "sleep"))
read_text = stack.enter_context(mock.patch.object(importlib.resources, "read_text"))
template = stack.enter_context(mock.patch.object(string, "Template"))
builtins_open = stack.enter_context(mock.patch.object(builtins, "open"))
stack.enter_context(mock.patch.object(os.path, "abspath", return_value="some path"))
extra_args = {
"extra_argument_1": "one",
"extra_argument_2": "two",
}
with stack:
ux.print_banner("hikari", True, False, extra_args=extra_args)
args = {
# Hikari stuff.
"hikari_version": "2.2.2",
"hikari_git_sha1": "12345678",
"hikari_copyright": "© 2020 Nekokatt",
"hikari_license": "MIT",
"hikari_install_location": "some path",
"hikari_documentation_url": "https://nekokatt.github.io/hikari/docs",
"hikari_discord_invite": "https://discord.gg/Jx4cNGG",
"hikari_source_url": "https://nekokatt.github.io/hikari",
"python_implementation": "CPython",
"python_version": "4.0.0",
"system_description": "Machine Potato 1.0.0",
}
args.update(extra_args)
template.assert_called_once_with(read_text())
template().safe_substitute.assert_called_once_with(args)
builtins_open.assert_called_once_with(1, "w", encoding="utf-8")
builtins_open.return_value.__enter__.return_value.write.assert_called_once_with(template().safe_substitute())
def test_overwrite_args_raises_error(self, mock_args):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(time, "sleep"))
stack.enter_context(mock.patch.object(colorlog.escape_codes, "escape_codes", new={}))
stack.enter_context(mock.patch.object(importlib.resources, "read_text"))
stack.enter_context(mock.patch.object(string, "Template"))
stack.enter_context(mock.patch.object(sys.stdout, "write"))
stack.enter_context(mock.patch.object(os.path, "abspath", return_value="some path"))
extra_args = {
"hikari_version": "overwrite",
}
with stack:
with pytest.raises(
ValueError, match=r"Cannot overwrite \$-substitution `hikari_version`. Please use a different key."
):
ux.print_banner("hikari", True, False, extra_args=extra_args)
class TestSupportsColor:
def test_when_not_allow_color(self):
assert ux.supports_color(False, True) is False
def test_when_CLICOLOR_FORCE_in_env(self):
with mock.patch.dict(os.environ, {"CLICOLOR_FORCE": "1"}, clear=True):
assert ux.supports_color(True, False) is True
def test_when_force_color(self):
with mock.patch.dict(os.environ, {"CLICOLOR_FORCE": "0"}, clear=True):
assert ux.supports_color(True, True) is True
def test_when_CLICOLOR_and_is_a_tty(self):
with mock.patch.object(sys.stdout, "isatty", return_value=True):
with mock.patch.dict(os.environ, {"CLICOLOR_FORCE": "0", "CLICOLOR": "1"}, clear=True):
assert ux.supports_color(True, False) is True
def test_when_CLICOLOR_is_0(self):
with mock.patch.object(sys.stdout, "isatty", return_value=True):
with mock.patch.dict(os.environ, {"CLICOLOR_FORCE": "0", "CLICOLOR": "0"}, clear=True):
assert ux.supports_color(True, False) is False
@pytest.mark.parametrize("colorterm", ["truecolor", "24bit", "TRUECOLOR", "24BIT"])
def test_when_COLORTERM_has_correct_value(self, colorterm):
with mock.patch.dict(os.environ, {"COLORTERM": colorterm}, clear=True):
assert ux.supports_color(True, False) is True
def test_when_plat_is_Pocket_PC(self):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.dict(os.environ, {}, clear=True))
stack.enter_context(mock.patch.object(sys, "platform", new="Pocket PC"))
with stack:
assert ux.supports_color(True, False) is False
@pytest.mark.parametrize(
("term_program", "ansicon", "isatty", "expected"),
[
("mintty", False, True, True),
("Terminus", False, True, True),
("some other", True, True, True),
("some other", False, True, False),
("some other", False, False, False),
("mintty", True, False, False),
("Terminus", True, False, False),
],
)
def test_when_plat_is_win32(self, term_program, ansicon, isatty, expected):
environ = {"TERM_PROGRAM": term_program}
if ansicon:
environ["ANSICON"] = "ooga booga"
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.dict(os.environ, environ, clear=True))
stack.enter_context(mock.patch.object(sys.stdout, "isatty", return_value=isatty))
stack.enter_context(mock.patch.object(sys, "platform", new="win32"))
with stack:
assert ux.supports_color(True, False) is expected
@pytest.mark.parametrize("isatty", [True, False])
def test_when_plat_is_not_win32(self, isatty):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.dict(os.environ, {}, clear=True))
stack.enter_context(mock.patch.object(sys.stdout, "isatty", return_value=isatty))
stack.enter_context(mock.patch.object(sys, "platform", new="linux"))
with stack:
assert ux.supports_color(True, False) is isatty
@pytest.mark.parametrize("isatty", [True, False])
@pytest.mark.parametrize("plat", ["linux", "win32"])
def test_when_PYCHARM_HOSTED(self, isatty, plat):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.dict(os.environ, {"PYCHARM_HOSTED": "OOGA BOOGA"}, clear=True))
stack.enter_context(mock.patch.object(sys.stdout, "isatty", return_value=isatty))
stack.enter_context(mock.patch.object(sys, "platform", new=plat))
with stack:
assert ux.supports_color(True, False) is True
@pytest.mark.parametrize("isatty", [True, False])
@pytest.mark.parametrize("plat", ["linux", "win32"])
def test_when_WT_SESSION(self, isatty, plat):
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.dict(os.environ, {"WT_SESSION": "OOGA BOOGA"}, clear=True))
stack.enter_context(mock.patch.object(sys.stdout, "isatty", return_value=isatty))
stack.enter_context(mock.patch.object(sys, "platform", new=plat))
with stack:
assert ux.supports_color(True, False) is True
class TestHikariVersion:
@pytest.mark.parametrize("v", ["1", "1.0.0dev2"])
def test_init_when_version_number_is_invalid(self, v):
with pytest.raises(ValueError, match=rf"Invalid version: '{v}'"):
ux.HikariVersion(v)
def test_init_when_prerelease(self):
assert ux.HikariVersion("1.2.3.dev99").prerelease == (".dev", 99)
def test_init_when_no_prerelease(self):
assert ux.HikariVersion("1.2.3").prerelease is None
def test_str_when_prerelease(self):
assert str(ux.HikariVersion("1.2.3.dev99")) == "1.2.3.dev99"
def test_str_when_no_prerelease(self):
assert str(ux.HikariVersion("1.2.3")) == "1.2.3"
def test_repr(self):
assert repr(ux.HikariVersion("1.2.3.dev99")) == "HikariVersion('1.2.3.dev99')"
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), True),
(ux.HikariVersion("42.212.4.dev99"), False),
(ux.HikariVersion("1.2.3.dev98"), False),
(ux.HikariVersion("1.2.3"), False),
],
)
def test_eq(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") == other) is result
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), False),
(ux.HikariVersion("42.212.4.dev99"), True),
(ux.HikariVersion("1.2.3.dev98"), True),
(ux.HikariVersion("1.2.3"), True),
],
)
def test_ne(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") != other) is result
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), False),
(ux.HikariVersion("42.212.4.dev99"), True),
(ux.HikariVersion("1.2.3.dev98"), False),
(ux.HikariVersion("1.2.3"), True),
],
)
def test_lt(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") < other) is result
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), True),
(ux.HikariVersion("42.212.4.dev99"), True),
(ux.HikariVersion("1.2.3.dev98"), False),
(ux.HikariVersion("1.2.3"), True),
],
)
def test_le(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") <= other) is result
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), False),
(ux.HikariVersion("42.212.4.dev99"), False),
(ux.HikariVersion("1.2.3.dev98"), True),
(ux.HikariVersion("1.2.3"), False),
],
)
def test_ge(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") > other) is result
@pytest.mark.parametrize(
("other", "result"),
[
(ux.HikariVersion("1.2.3.dev99"), True),
(ux.HikariVersion("42.212.4.dev99"), False),
(ux.HikariVersion("1.2.3.dev98"), True),
(ux.HikariVersion("1.2.3"), False),
],
)
def test_gt(self, other, result):
assert (ux.HikariVersion("1.2.3.dev99") >= other) is result
@pytest.mark.asyncio()
class TestCheckForUpdates:
@pytest.fixture()
def http_settings(self):
return mock.Mock(spec_set=config.HTTPSettings)
@pytest.fixture()
def proxy_settings(self):
return mock.Mock(spec_set=config.ProxySettings)
async def test_when_not_official_pypi_release(self, http_settings, proxy_settings):
stack = contextlib.ExitStack()
logger = stack.enter_context(mock.patch.object(ux, "_LOGGER"))
create_client_session = stack.enter_context(mock.patch.object(net, "create_client_session"))
stack.enter_context(mock.patch.object(_about, "__git_sha1__", new="HEAD"))
with stack:
await ux.check_for_updates(http_settings=http_settings, proxy_settings=proxy_settings)
logger.debug.assert_not_called()
logger.info.assert_not_called()
create_client_session.assert_not_called()
async def test_when_error_fetching(self, http_settings, proxy_settings):
ex = RuntimeError("testing")
stack = contextlib.ExitStack()
logger = stack.enter_context(mock.patch.object(ux, "_LOGGER"))
stack.enter_context(mock.patch.object(_about, "__git_sha1__", new="1234567890"))
create_client_session = stack.enter_context(mock.patch.object(net, "create_client_session", side_effect=ex))
create_tcp_connector = stack.enter_context(mock.patch.object(net, "create_tcp_connector"))
with stack:
await ux.check_for_updates(http_settings=http_settings, proxy_settings=proxy_settings)
logger.debug.assert_called_once_with("Failed to fetch hikari version details", exc_info=ex)
create_tcp_connector.assert_called_once_with(dns_cache=False, limit=1, http_settings=http_settings)
create_client_session.assert_called_once_with(
connector=create_tcp_connector(),
connector_owner=True,
http_settings=http_settings,
raise_for_status=True,
trust_env=proxy_settings.trust_env,
)
async def test_when_no_new_available_releases(self, http_settings, proxy_settings):
data = {
"releases": {
"0.1.0": [{"yanked": False}],
"1.0.0": [{"yanked": False}],
"1.0.0.dev1": [{"yanked": False}],
"1.0.1": [{"yanked": True}],
}
}
_request = hikari_test_helpers.AsyncContextManagerMock()
_request.json = mock.AsyncMock(return_value=data)
_client_session = hikari_test_helpers.AsyncContextManagerMock()
_client_session.get = mock.Mock(return_value=_request)
stack = contextlib.ExitStack()
logger = stack.enter_context(mock.patch.object(ux, "_LOGGER"))
create_client_session = stack.enter_context(
mock.patch.object(net, "create_client_session", return_value=_client_session)
)
create_tcp_connector = stack.enter_context(mock.patch.object(net, "create_tcp_connector"))
stack.enter_context(mock.patch.object(_about, "__version__", new="1.0.0"))
stack.enter_context(mock.patch.object(_about, "__git_sha1__", new="1234567890"))
with stack:
await ux.check_for_updates(http_settings=http_settings, proxy_settings=proxy_settings)
logger.debug.assert_not_called()
logger.info.assert_not_called()
create_tcp_connector.assert_called_once_with(dns_cache=False, limit=1, http_settings=http_settings)
create_client_session.assert_called_once_with(
connector=create_tcp_connector(),
connector_owner=True,
http_settings=http_settings,
raise_for_status=True,
trust_env=proxy_settings.trust_env,
)
_client_session.get.assert_called_once_with(
"https://pypi.org/pypi/hikari/json",
allow_redirects=http_settings.max_redirects is not None,
max_redirects=http_settings.max_redirects,
proxy=proxy_settings.url,
proxy_headers=proxy_settings.all_headers,
)
@pytest.mark.parametrize("v", ["1.0.1", "1.0.1.dev10"])
async def test_check_for_updates(self, v, http_settings, proxy_settings):
data = {
"releases": {
"0.1.0": [{"yanked": False}],
"1.0.0": [{"yanked": False}],
"1.0.0.dev1": [{"yanked": False}],
v: [{"yanked": False}, {"yanked": True}],
"1.0.2": [{"yanked": True}],
}
}
_request = hikari_test_helpers.AsyncContextManagerMock()
_request.json = mock.AsyncMock(return_value=data)
_client_session = hikari_test_helpers.AsyncContextManagerMock()
_client_session.get = mock.Mock(return_value=_request)
stack = contextlib.ExitStack()
logger = stack.enter_context(mock.patch.object(ux, "_LOGGER"))
create_client_session = stack.enter_context(
mock.patch.object(net, "create_client_session", return_value=_client_session)
)
create_tcp_connector = stack.enter_context(mock.patch.object(net, "create_tcp_connector"))
stack.enter_context(mock.patch.object(_about, "__version__", new="1.0.0.dev1"))
stack.enter_context(mock.patch.object(_about, "__git_sha1__", new="1234567890"))
with stack:
await ux.check_for_updates(http_settings=http_settings, proxy_settings=proxy_settings)
logger.debug.assert_not_called()
logger.info.assert_called_once_with(
"A newer version of hikari is available, consider upgrading to %s", ux.HikariVersion(v)
)
create_tcp_connector.assert_called_once_with(dns_cache=False, limit=1, http_settings=http_settings)
create_client_session.assert_called_once_with(
connector=create_tcp_connector(),
connector_owner=True,
http_settings=http_settings,
raise_for_status=True,
trust_env=proxy_settings.trust_env,
)
_client_session.get.assert_called_once_with(
"https://pypi.org/pypi/hikari/json",
allow_redirects=http_settings.max_redirects is not None,
max_redirects=http_settings.max_redirects,
proxy=proxy_settings.url,
proxy_headers=proxy_settings.all_headers,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
p2pmc/p2pmc/wsgi.py
|
"""
WSGI config for p2pmc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p2pmc.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
AGD_ST/search/train.py
|
from __future__ import division
import os
import sys
# import time
import glob
import logging
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.utils
# import torch.nn.functional as F
# import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
import numpy as np
import matplotlib
# from matplotlib import pyplot as plt
# from PIL import Image
from config_train import config
from datasets import ImageDataset, PairedImageDataset
# from utils.init_func import init_weight
from utils.darts_utils import (
create_exp_dir,
save,
# plot_op,
# plot_path_width,
# objective_acc_lat,
)
# from model_search import NAS_GAN as Network
from model_infer import NAS_GAN_Infer
# from util_gan.cyclegan import Generator
from util_gan.fid_score import compute_fid
from util_gan.lr import LambdaLR
from quantize import QConv2d, QConvTranspose2d, QuantMeasure
from thop import profile
from thop.count_hooks import count_convNd
# Use wandb
import wandb
from datetime import datetime
from maestro_helpers import is_maestro, with_maestro
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def count_custom(m, x, y):
m.total_ops += 0
custom_ops = {
QConv2d: count_convNd,
QConvTranspose2d: count_convNd,
QuantMeasure: count_custom,
nn.InstanceNorm2d: count_custom,
}
def main():
# load env configs
config.USE_MAESTRO = is_maestro()
config.TEST_RUN = os.environ.get("TEST_RUN", "0") == "1"
config.stage = "train"
if config.TEST_RUN is True:
logging.info("Running in TEST_RUN mode for 2 epochs.")
config.nepochs = 2
config.eval_epoch = 1
config.decay_epoch = 1
config.niters_per_epoch = 2
try:
config.seed = int(os.environ.get("RNG_SEED", "12345"))
except Exception:
print("WARNING USING 'NONE' SEED AS 'RNG_SEED' WAS NOT INTEGER...!!!")
config.seed = None
# wandb run
wandb.init(
project="AGD_Maestro",
name=f"{config.dataset}-{config.stage}-{'with' if config.USE_MAESTRO is True else 'without'}_maestro",
tags=[config.dataset, "AGD", config.stage]
+ (["maestro"] if config.USE_MAESTRO is True else []),
entity="rcai",
group=os.environ.get("WANDB_GROUP", None) or f"AGD_Maestro ({datetime.now()})",
job_type=f"Stage {config.stage}",
reinit=True,
sync_tensorboard=True,
save_code=True,
mode="disabled" if config.TEST_RUN is True else "online",
)
config.save = "ckpt/{}".format(config.save)
create_exp_dir(config.save, scripts_to_save=glob.glob("*.py") + glob.glob("*.sh"))
logger = SummaryWriter(config.save)
log_format = "%(asctime)s %(message)s"
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p",
)
fh = logging.FileHandler(os.path.join(config.save, "log.txt"))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("args = %s", str(config))
# preparation ################
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
seed = config.seed
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
state = torch.load(os.path.join(config.load_path, "arch.pt"))
# Model #######################################
model = NAS_GAN_Infer(
state["alpha"],
state["beta"],
state["ratio"],
state["beta_sh"],
state["ratio_sh"],
layers=config.layers,
width_mult_list=config.width_mult_list,
width_mult_list_sh=config.width_mult_list_sh,
loss_weight=config.loss_weight,
quantize=config.quantize,
)
flops, params = profile(
model, inputs=(torch.randn(1, 3, 256, 256),), custom_ops=custom_ops
)
with with_maestro(False):
flops = model.forward_flops(size=(3, 256, 256))
with with_maestro(True):
energy = model.forward_flops(size=(3, 256, 256))
logging.info(
"params = %fMB, FLOPs = %fGB, energy = %fGB",
params / 1e6,
flops / 1e9,
energy / 1e9,
)
model = torch.nn.DataParallel(model).cuda()
if type(config.pretrain) == str:
state_dict = torch.load(config.pretrain)
model.load_state_dict(state_dict)
# else:
# features = [model.module.stem, model.module.cells, model.module.header]
# init_weight(features, nn.init.kaiming_normal_, nn.InstanceNorm2d, config.bn_eps, config.bn_momentum, mode='fan_in', nonlinearity='relu')
# teacher_model = Generator(3, 3)
# teacher_model.load_state_dict(torch.load(config.generator_A2B))
# teacher_model = torch.nn.DataParallel(teacher_model).cuda()
# for param in teacher_model.parameters():
# param.require_grads = False
# Optimizer ###################################
base_lr = config.lr
parameters = []
parameters += list(model.module.stem.parameters())
parameters += list(model.module.cells.parameters())
parameters += list(model.module.header.parameters())
if config.opt == "Adam":
optimizer = torch.optim.Adam(parameters, lr=base_lr, betas=config.betas)
elif config.opt == "Sgd":
optimizer = torch.optim.SGD(
parameters,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
else:
logging.info("Wrong Optimizer Type.")
sys.exit()
# lr policy ##############################
total_iteration = config.nepochs * config.niters_per_epoch # noqa: F841
if config.lr_schedule == "linear":
lr_policy = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=LambdaLR(config.nepochs, 0, config.decay_epoch).step
)
elif config.lr_schedule == "exponential":
lr_policy = torch.optim.lr_scheduler.ExponentialLR(optimizer, config.lr_decay)
elif config.lr_schedule == "multistep":
lr_policy = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=config.milestones, gamma=config.gamma
)
else:
logging.info("Wrong Learning Rate Schedule Type.")
sys.exit()
# data loader ############################
transforms_ = [
# transforms.Resize(int(config.image_height*1.12), Image.BICUBIC),
# transforms.RandomCrop(config.image_height),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
# train_loader_model = DataLoader(ImageDataset(config.dataset_path, transforms_=transforms_, unaligned=True),
# batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
train_loader_model = DataLoader(
PairedImageDataset(
config.dataset_path, config.target_path, transforms_=transforms_
),
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
)
transforms_ = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
test_loader = DataLoader(
ImageDataset(config.dataset_path, transforms_=transforms_, mode="test"),
batch_size=1,
shuffle=False,
num_workers=config.num_workers,
)
if config.eval_only:
logging.info("Eval: fid = %f", infer(0, model, test_loader, logger))
sys.exit(0)
best_fid = 1000
best_epoch = 0
# wandb hooks
wandb.config.update(config)
wandb.watch(
model,
log=None,
log_graph=True,
)
tbar = tqdm(range(config.nepochs), ncols=80)
# epoch loop, train loop
for epoch in tbar:
logging.info(config.save)
logging.info("lr: " + str(optimizer.param_groups[0]["lr"]))
# training
tbar.set_description("[Epoch %d/%d][train...]" % (epoch + 1, config.nepochs))
train(train_loader_model, model, optimizer, lr_policy, logger, epoch)
torch.cuda.empty_cache()
lr_policy.step()
# validation
if epoch and not (epoch + 1) % config.eval_epoch:
tbar.set_description(
"[Epoch %d/%d][validation...]" % (epoch + 1, config.nepochs)
)
with torch.no_grad():
valid_fid = infer(epoch, model, test_loader, logger)
if valid_fid < best_fid:
best_fid = valid_fid
best_epoch = epoch
logger.add_scalar("fid/val", valid_fid, epoch)
logging.info("Epoch %d: valid_fid %.3f" % (epoch, valid_fid))
logger.add_scalar("flops/val", flops, epoch)
logging.info("Epoch %d: flops %.3f" % (epoch, flops))
logger.add_scalar("energy/val", energy, epoch)
logging.info("Epoch %d: energy %.3f" % (epoch, energy))
logging.info("Best fid:%.3f, Best epoch:%d" % (best_fid, best_epoch))
save(model, os.path.join(config.save, "weights_%d.pt" % epoch))
save(model, os.path.join(config.save, "weights.pt"))
def train(train_loader_model, model, optimizer, lr_policy, logger, epoch):
model.train()
bar_format = "{desc}[{elapsed}<{remaining},{rate_fmt}]"
pbar = tqdm(
range(config.niters_per_epoch), file=sys.stdout, bar_format=bar_format, ncols=80
)
dataloader_model = iter(train_loader_model)
for step in pbar:
lr = optimizer.param_groups[0]["lr"] # noqa: F841
optimizer.zero_grad()
minibatch = dataloader_model.next()
input = minibatch["A"]
input = input.cuda(non_blocking=True)
target = minibatch["B"]
target = target.cuda(non_blocking=True)
loss = model.module._loss(input, target)
logger.add_scalar("loss/train", loss, epoch * len(pbar) + step)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip)
optimizer.step()
optimizer.zero_grad()
pbar.set_description("[Step %d/%d]" % (step + 1, len(train_loader_model)))
torch.cuda.empty_cache()
del loss
def infer(epoch, model, test_loader, logger):
model.eval()
outdir = "output/gen_epoch_%d" % (epoch)
if not os.path.exists(outdir):
os.makedirs(outdir)
for i, batch in enumerate(test_loader):
# Set model input
real_A = Variable(batch["A"]).cuda()
fake_B = 0.5 * (model(real_A).data + 1.0)
save_image(fake_B, os.path.join(outdir, "%04d.png" % (i + 1)))
fid = compute_fid(outdir, config.dataset_path + "/test/B")
os.rename(outdir, outdir + "_%.3f" % (fid))
return fid
if __name__ == "__main__":
main()
|
[] |
[] |
[
"RNG_SEED",
"TEST_RUN",
"WANDB_GROUP"
] |
[]
|
["RNG_SEED", "TEST_RUN", "WANDB_GROUP"]
|
python
| 3 | 0 | |
internal/markdown/issue_test.go
|
package markdown
import (
"backlog/internal/backlog"
"encoding/json"
"io/ioutil"
"log"
"os"
"testing"
"github.com/moutend/go-backlog/pkg/types"
)
func TestIssueUnmarshal(t *testing.T) {
space := os.Getenv("BACKLOG_SPACE")
token := os.Getenv("BACKLOG_TOKEN")
backlog.Setup(space, token)
backlog.SetDebug(true)
data, err := ioutil.ReadFile("/tmp/sdc/sdc-212.md")
if err != nil {
t.Fatal(err)
}
i := &Issue{}
if err := i.Unmarshal(data); err != nil {
t.Fatal(err)
}
output, err := json.Marshal(i)
if err != nil {
t.Fatal(err)
}
log.Printf("JSON: %s\n", output)
}
func TestIssueMarshal(t *testing.T) {
space := os.Getenv("BACKLOG_SPACE")
token := os.Getenv("BACKLOG_TOKEN")
backlog.Setup(space, token)
backlog.SetDebug(true)
i := &Issue{
Project: &types.Project{
ProjectKey: "LIFE",
},
Issue: &types.Issue{
IssueKey: "LIFE-123",
Summary: "Issue Summary",
Description: "Issue description",
StartDate: types.NewDate("2020-02-02"),
DueDate: types.NewDate("2020-02-02"),
EstimatedHours: types.NewHours(1.5),
ActualHours: types.NewHours(1.5),
Priority: &types.Priority{
Name: "High",
},
Status: &types.ProjectStatus{
Name: "Ongoing",
},
},
}
output, err := i.Marshal()
if err != nil {
t.Fatal(err)
}
log.Printf("markdown: %s\n", output)
}
|
[
"\"BACKLOG_SPACE\"",
"\"BACKLOG_TOKEN\"",
"\"BACKLOG_SPACE\"",
"\"BACKLOG_TOKEN\""
] |
[] |
[
"BACKLOG_TOKEN",
"BACKLOG_SPACE"
] |
[]
|
["BACKLOG_TOKEN", "BACKLOG_SPACE"]
|
go
| 2 | 0 | |
pkg/query-service/constants/constants.go
|
package constants
import (
"os"
"strconv"
)
const HTTPHostPort = "0.0.0.0:8080"
var DruidClientUrl = os.Getenv("DruidClientUrl")
var DruidDatasource = os.Getenv("DruidDatasource")
var DEFAULT_TELEMETRY_ANONYMOUS = false
func IsTelemetryEnabled() bool {
isTelemetryEnabledStr := os.Getenv("TELEMETRY_ENABLED")
isTelemetryEnabledBool, err := strconv.ParseBool(isTelemetryEnabledStr)
if err != nil {
return true
}
return isTelemetryEnabledBool
}
const TraceTTL = "traces"
const MetricsTTL = "metrics"
const ALERTMANAGER_API_PREFIX = "http://alertmanager:9093/api/"
const RELATIONAL_DATASOURCE_PATH = "/var/lib/signoz/signoz.db"
const (
ServiceName = "serviceName"
HttpRoute = "httpRoute"
HttpCode = "httpCode"
HttpHost = "httpHost"
HttpUrl = "httpUrl"
HttpMethod = "httpMethod"
Component = "component"
OperationDB = "name"
OperationRequest = "operation"
)
|
[
"\"DruidClientUrl\"",
"\"DruidDatasource\"",
"\"TELEMETRY_ENABLED\""
] |
[] |
[
"TELEMETRY_ENABLED",
"DruidDatasource",
"DruidClientUrl"
] |
[]
|
["TELEMETRY_ENABLED", "DruidDatasource", "DruidClientUrl"]
|
go
| 3 | 0 | |
test/e2e/command/botkube.go
|
// Copyright (c) 2019 InfraCloud Technologies
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package command
import (
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"github.com/infracloudio/botkube/pkg/config"
"github.com/infracloudio/botkube/pkg/execute"
"github.com/infracloudio/botkube/test/e2e/utils"
"github.com/nlopes/slack"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type botkubeCommand struct {
command string
expected string
}
// Send botkube command via Slack message and check if BotKube returns correct response
func (c *context) testBotkubeCommand(t *testing.T) {
botkubeVersion := os.Getenv("BOTKUBE_VERSION")
// Test cases
tests := map[string]botkubeCommand{
"BotKube ping": {
command: "ping",
expected: fmt.Sprintf("```pong from cluster '%s'\n\nK8s Server Version: %s\nBotKube version: %s```", c.Config.Settings.ClusterName, execute.K8sVersion, botkubeVersion),
},
"BotKube filters list": {
command: "filters list",
expected: "FILTER ENABLED DESCRIPTION\n" +
"NamespaceChecker true Checks if event belongs to blocklisted namespaces and filter them.\n" +
"NodeEventsChecker true Sends notifications on node level critical events.\n" +
"ObjectAnnotationChecker true Checks if annotations botkube.io/* present in object specs and filters them.\n" +
"PodLabelChecker true Checks and adds recommedations if labels are missing in the pod specs.\n" +
"ImageTagChecker true Checks and adds recommendation if 'latest' image tag is used for container image.\n" +
"IngressValidator true Checks if services and tls secrets used in ingress specs are available.\n",
},
"BotKube commands list": {
command: "commands list",
expected: "allowed verbs:\n" +
" - api-resources\n" +
" - describe\n" +
" - diff\n" +
" - explain\n" +
" - get\n" +
" - logs\n" +
" - api-versions\n" +
" - cluster-info\n" +
" - top\n" +
" - auth\n" +
"allowed resources:\n" +
" - nodes\n" +
" - deployments\n" +
" - pods\n" +
" - namespaces\n" +
" - daemonsets\n" +
" - statefulsets\n" +
" - storageclasses\n",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
if c.TestEnv.Config.Communications.Slack.Enabled {
// Send message to a channel
c.SlackServer.SendMessageToBot(c.Config.Communications.Slack.Channel, test.command)
// Get last seen slack message
lastSeenMsg := c.GetLastSeenSlackMessage()
// Convert text message into Slack message structure
m := slack.Message{}
err := json.Unmarshal([]byte(*lastSeenMsg), &m)
assert.NoError(t, err, "message should decode properly")
assert.Equal(t, c.Config.Communications.Slack.Channel, m.Channel)
switch test.command {
case "filters list":
fl := compareFilters(strings.Split(test.expected, "\n"), strings.Split(strings.Trim(m.Text, "```"), "\n"))
assert.Equal(t, fl, true)
case "commands list":
cl := compareFilters(strings.Split(test.expected, "\n"), strings.Split(strings.Trim(m.Text, "```"), "\n"))
assert.Equal(t, cl, true)
default:
assert.Equal(t, test.expected, m.Text)
}
}
})
}
}
func compareFilters(expected, actual []string) bool {
if len(expected) != len(actual) {
return false
}
// Compare slices
for _, a := range actual {
found := false
for _, e := range expected {
if a == e {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Test disable notification with BotKube notifier command
// - disable notifier with '@BotKube notifier stop'
// - create pod and verify BotKube doesn't send notification
// - enable notifier with '@BotKube notifier start'
func (c *context) testNotifierCommand(t *testing.T) {
// Disable notifier with @BotKube notifier stop
t.Run("disable notifier", func(t *testing.T) {
if c.TestEnv.Config.Communications.Slack.Enabled {
// Send message to a channel
c.SlackServer.SendMessageToBot(c.Config.Communications.Slack.Channel, "notifier stop")
// Get last seen slack message
lastSeenMsg := c.GetLastSeenSlackMessage()
// Convert text message into Slack message structure
m := slack.Message{}
err := json.Unmarshal([]byte(*lastSeenMsg), &m)
assert.NoError(t, err, "message should decode properly")
assert.Equal(t, c.Config.Communications.Slack.Channel, m.Channel)
assert.Equal(t, fmt.Sprintf("```Sure! I won't send you notifications from cluster '%s' anymore.```", c.Config.Settings.ClusterName), m.Text)
assert.Equal(t, config.Notify, false)
}
})
// Create pod and verify that BotKube is not sending notifications
pod := utils.CreateObjects{
Kind: "pod",
Namespace: "test",
Specs: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod-notifier"}},
ExpectedSlackMessage: utils.SlackMessage{
Attachments: []slack.Attachment{{Color: "good", Fields: []slack.AttachmentField{{Title: "Pod create", Value: "Pod `test-pod` in of cluster `test-cluster-1`, namespace `test` has been created:\n```Resource created\nRecommendations:\n- pod 'test-pod' creation without labels should be avoided.\n```", Short: false}}, Footer: "BotKube"}},
},
}
t.Run("create resource", func(t *testing.T) {
// Inject an event into the fake client.
utils.CreateResource(t, pod)
if c.TestEnv.Config.Communications.Slack.Enabled {
// Get last seen slack message
lastSeenMsg := c.GetLastSeenSlackMessage()
// Convert text message into Slack message structure
m := slack.Message{}
err := json.Unmarshal([]byte(*lastSeenMsg), &m)
assert.NoError(t, err, "message should decode properly")
assert.Equal(t, c.Config.Communications.Slack.Channel, m.Channel)
assert.NotEqual(t, pod.ExpectedSlackMessage.Attachments, m.Attachments)
}
})
// Revert and Enable notifier
t.Run("Enable notifier", func(t *testing.T) {
if c.TestEnv.Config.Communications.Slack.Enabled {
// Send message to a channel
c.SlackServer.SendMessageToBot(c.Config.Communications.Slack.Channel, "notifier start")
// Get last seen slack message
lastSeenMsg := c.GetLastSeenSlackMessage()
// Convert text message into Slack message structure
m := slack.Message{}
err := json.Unmarshal([]byte(*lastSeenMsg), &m)
assert.NoError(t, err, "message should decode properly")
assert.Equal(t, c.Config.Communications.Slack.Channel, m.Channel)
assert.Equal(t, fmt.Sprintf("```Brace yourselves, notifications are coming from cluster '%s'.```", c.Config.Settings.ClusterName), m.Text)
assert.Equal(t, config.Notify, true)
}
})
}
|
[
"\"BOTKUBE_VERSION\""
] |
[] |
[
"BOTKUBE_VERSION"
] |
[]
|
["BOTKUBE_VERSION"]
|
go
| 1 | 0 | |
src/cauliflowervest/server/settings.py
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configurable settings module for the server."""
import base64
import os
import sys
from cauliflowervest.server import permissions
DEBUG = False
DEVELOPMENT = ('Development' in os.environ.get('SERVER_SOFTWARE', '')
and 'testbed' not in os.environ.get('SERVER_SOFTWARE', ''))
TEST = 'unittest2' in sys.modules or 'unittest' in sys.modules
DEFAULT_CRYPTO_BACKEND = 'keyczar'
DEFAULT_EMAIL_DOMAIN = 'example.com'
DEFAULT_EMAIL_SENDER = '[email protected]'
DEFAULT_EMAIL_REPLY_TO = '[email protected]'
# These are the default permissions that are conferred to all domain users. This
# replaces the old ALLOW_ALL_DOMAIN_USERS_TO_ESCROW setting. To achieve the same
# effect, simply add or remove permissions.ESCROW to the set of default perms
# for the relevant key types below.
DEFAULT_PERMISSIONS = {
permissions.TYPE_BITLOCKER: (permissions.ESCROW,),
permissions.TYPE_DUPLICITY: (permissions.ESCROW, permissions.RETRIEVE_OWN),
permissions.TYPE_FILEVAULT: (permissions.ESCROW, permissions.RETRIEVE_OWN),
permissions.TYPE_LUKS: (permissions.ESCROW, permissions.RETRIEVE_OWN),
permissions.TYPE_PROVISIONING: (permissions.ESCROW,
permissions.RETRIEVE_OWN,
permissions.RETRIEVE_CREATED_BY),
}
GROUPS = {
permissions.TYPE_BITLOCKER: [
('front-line-support', (permissions.RETRIEVE,)),
('developers', (permissions.SET_REGULAR,)),
('security-team', (permissions.SET_SILENT,)),
],
permissions.TYPE_FILEVAULT: [
('front-line-support', (permissions.RETRIEVE,
permissions.CHANGE_OWNER)),
('developers', (permissions.SET_REGULAR,)),
('security-team', (permissions.SET_SILENT,)),
],
}
KEY_TYPE_DATASTORE_FILEVAULT = 'key_type_datastore_filevault'
KEY_TYPE_DEFAULT_FILEVAULT = KEY_TYPE_DATASTORE_FILEVAULT
KEY_TYPE_DATASTORE_XSRF = 'key_type_datastore_xsrf'
KEY_TYPE_DEFAULT_XSRF = KEY_TYPE_DATASTORE_XSRF
# Turn to False to support v0.8 clients.
XSRF_PROTECTION_ENABLED = True
# The DEMO_KEYS list is purely for example only. See the CauliflowerVest
# Google Code documentation for more information on how to integrate enterprise
# key servers.
DEMO_KEYS = [
{'versionNumber': 1,
'aesKeyString': base64.urlsafe_b64encode('16_byte_string__'),
'aesKeySize': 128,
'hmacKeyString': base64.urlsafe_b64encode(
'32_byte_string_bbbbbbbbbbbbbbbbb'),
'hmacKeySize': 256,
'status': 'PRIMARY',
},
]
# This DEMO value should be kept secret and safe in a similar manner.
DEMO_XSRF_SECRET = os.environ.get('CURRENT_VERSION_ID', 'random_default_value')
# These email addresses will be notified when a user of the named permission
# fetches a passphrase, in addition to the default behavior.
RETRIEVE_AUDIT_ADDRESSES = []
SILENT_AUDIT_ADDRESSES = []
HELPDESK_NAME = 'helpdesk'
HELPDESK_EMAIL = '[email protected]'
BITLOCKER_RETRIEVAL_EMAIL_SUBJECT = (
'BitLocker Windows disk encryption recovery key retrieval notification.')
DUPLICITY_RETRIEVAL_EMAIL_SUBJECT = (
'Duplicity Linux backup encryption key pair retrieval notification.')
FILEVAULT_RETRIEVAL_EMAIL_SUBJECT = (
'FileVault 2 Mac disk encryption passphrase retrieval notification.')
LUKS_RETRIEVAL_EMAIL_SUBJECT = (
'Luks Linux disk encryption passphrase retrieval notification.')
PROVISIONING_RETRIEVAL_EMAIL_SUBJECT = (
'Provisioning password retrieval notification.')
|
[] |
[] |
[
"CURRENT_VERSION_ID",
"SERVER_SOFTWARE"
] |
[]
|
["CURRENT_VERSION_ID", "SERVER_SOFTWARE"]
|
python
| 2 | 0 | |
sunpy_sphinx_theme/conf.py
|
import os
import socket
from urllib.parse import urljoin
from sunpy_sphinx_theme import get_html_theme_path
html_theme_path = get_html_theme_path()
html_theme = "sunpy"
html_static_path = [os.path.join(html_theme_path[0], html_theme, "static")]
html_extra_path = [os.path.join(html_theme_path[0], html_theme, "static", "img")]
templates_path = [os.path.join(html_theme_path[0], html_theme, "templates")]
html_favicon = os.path.join(html_static_path[0], "img", "favicon-32.ico")
svg_icon = os.path.join(html_static_path[0], "img", "sunpy_icon.svg")
png_icon = os.path.join(html_static_path[0], "img", "sunpy_icon_128x128.png")
on_rtd = os.environ.get("READTHEDOCS", False) == "True"
if on_rtd:
sunpy_website_url_base = "https://sunpy.org"
else:
sunpy_website_url_base = socket.gethostname()
def page_url(page):
return urljoin(sunpy_website_url_base, page)
html_sidebars = {
"**": ["docsidebar.html"],
}
html_theme_options = {
"page_toctree_depths": {"generated/gallery": 2},
"on_rtd": on_rtd,
"navbar_links": [
(
"About",
[
("Our Mission", page_url("about.html"), 1),
(
"Acknowledge SunPy",
page_url("about.html") + "#acknowledging-or-citing-sunpy",
1,
),
(
"Code of Conduct",
page_url("coc.html"),
1,
),
],
1,
),
(
"Documentation",
[
("sunpy", "https://docs.sunpy.org/en/stable/", 1),
("ndcube", "https://docs.sunpy.org/projects/ndcube/", 1),
("drms", "https://docs.sunpy.org/projects/drms/", 1),
("aiapy", "https://aiapy.readthedocs.io/en/stable/", 1),
("pfsspy", "https://pfsspy.readthedocs.io/en/stable/", 1),
("sunraster", "https://docs.sunpy.org/projects/sunraster/en/stable/", 1),
("sunkit-instruments", "https://docs.sunpy.org/projects/sunkit-instruments/en/stable/", 1),
("sunkit-image", "https://docs.sunpy.org/projects/sunkit-image/en/stable/", 1),
("radiospectra", "https://docs.sunpy.org/projects/radiospectra/en/stable/", 1),
("pyflct", "https://pyflct.readthedocs.io/en/stable/", 1),
("ablog", "https://ablog.readthedocs.io/", 1),
],
1,
),
("Blog", page_url("blog.html"), 1),
("Support Us", page_url("contribute.html"), 1),
("Get Help", page_url("help.html"), 1),
(
"SunPy Project",
[
("SunPy Project", page_url("project/"), 1),
("Community Roles", page_url("project/roles.html"), 1),
("Affiliated Packages", page_url("project/affiliated.html"), 1),
("Emeritus role holders", page_url("project/former.html"), 1),
],
1,
),
],
# Only really setup to look nice with 3 values.
"footer_links": [
("GitHub", "https://github.com/sunpy/sunpy", 1),
("Twitter", "https://twitter.com/SunPyProject", 1),
("Matrix", "https://app.element.io/#/room/#sunpy:openastronomy.org", 1),
],
}
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
tests/test_command.py
|
"""Functional tests."""
import os
import yaml
from pathlib import Path
from contextlib import contextmanager
from click.testing import CliRunner
import pytest
from molecule.util import run_command
@contextmanager
def change_dir(path):
prev = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev)
def test_molecule_init_role():
"""Verify that init role works."""
cmd = ['molecule', 'init', 'role', 'myrole', '--driver-name', 'virtup']
runner = CliRunner()
with runner.isolated_filesystem():
result = run_command(cmd)
assert result.returncode == 0
def test_molecule_init_scenario():
"""Verify that init role works."""
cmd = ['molecule', 'init', 'scenario', 'default', '--driver-name', 'virtup']
runner = CliRunner()
with runner.isolated_filesystem():
result = run_command(cmd)
assert result.returncode == 0
@pytest.mark.parametrize('scenario_to_test', ['local', 'remote'])
def test_molecule_scenario(scenario_to_test):
basedir = Path(__file__).resolve().parent
testdir = basedir / 'scenarios' / scenario_to_test
with change_dir(testdir):
result = run_command(['molecule', 'test'])
assert result.returncode == 0
def test_molecule_external_options_file(tmp_path):
optionsfile = tmp_path / 'options.yml'
options = {
'connection': 'local',
'host': 'localhost',
}
optionsfile.write_text(yaml.dump(options, explicit_start=True))
env = os.environ.copy()
env['VIRTUP_OPTIONS_FILE'] = str(optionsfile)
basedir = Path(__file__).resolve().parent
testdir = basedir / 'scenarios' / 'local'
with change_dir(testdir):
result = run_command(['molecule', 'test'], env=env)
assert result.returncode == 0
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
AutoScaleALL.py
|
#!/home/opc/py36env/bin/python
#################################################################################################################
# OCI - Scheduled Auto Scaling Script
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl
#
# Written by: Richard Garsthagen
# Contributors: Joel Nation
# Contributors: Adi Zohar
#################################################################################################################
# Application Command line parameters
#
# -t config - Config file section to use (tenancy profile)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -a - Action - All,Up,Down
# -tag - Tag - Default Schedule
# -rg - Filter on Region
# -ic - include compartment ocid
# -ec - exclude compartment ocid
# -ignrtime - ignore region time zone
# -ignormysql- ignore mysql execution
# -printocid - print ocid of object
# -topic - topic to sent summary
# -h - help
#
#################################################################################################################
import oci
import datetime
import calendar
import threading
import time
import sys
import argparse
import os
import logging
# You can modify / translate the tag names used by this script - case sensitive!!!
AnyDay = "AnyDay"
Weekend = "Weekend"
WeekDay = "WeekDay"
Version = "2022.02.03"
# ============== CONFIGURE THIS SECTION ======================
# OCI Configuration
# ============================================================
ComputeShutdownMethod = "SOFTSTOP"
LogLevel = "ALL" # Use ALL or ERRORS. When set to ERRORS only a notification will be published if error occurs
TopicID = "" # Enter Topic OCID if you want the script to publish a message about the scaling actions
AlternativeWeekend = False # Set to True is your weekend is Friday/Saturday
RateLimitDelay = 2 # Time in seconds to wait before retry of operation
##########################################################################
# Get current host time and utc on execution
##########################################################################
current_host_time = datetime.datetime.today()
current_utc_time = datetime.datetime.utcnow()
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
MakeOut("")
MakeOut('#' * chars)
MakeOut("#" + name.center(chars - 2, " ") + "#")
MakeOut('#' * chars)
##########################################################################
# Get Current Hour per the region
##########################################################################
def get_current_hour(region, ignore_region_time=False):
if region[:2] == 'eu':
timezdiff = 2
elif region[:2] == 'uk':
timezdiff = 0
elif region == 'af-johannesburg-1':
timezdiff = 2
elif region == 'ap-chiyoda-1':
timezdiff = 9
elif region == 'ap-chuncheon-1':
timezdiff = 9
elif region == 'ap-hyderabad-1':
timezdiff = 5.5
elif region == 'ap-melbourne-1':
timezdiff = 10
elif region == 'ap-mumbai-1':
timezdiff = 5.5
elif region == 'ap-osaka-1':
timezdiff = 9
elif region == 'ap-seoul-1':
timezdiff = 9
elif region == 'ap-singapore-1':
timezdiff = 8
elif region == 'ap-sydney-1':
timezdiff = 10
elif region == 'ap-tokyo-1':
timezdiff = 9
elif region == 'ca-montreal-1':
timezdiff = -4
elif region == 'ca-toronto-1':
timezdiff = -4
elif region == 'il-jerusalem-1':
timezdiff = 3
elif region == 'me-abudhabi-1':
timezdiff = 4
elif region == 'me-dubai-1':
timezdiff = 4
elif region == 'me-jeddah-1':
timezdiff = 3
elif region == 'sa-santiago-1':
timezdiff = -4
elif region == 'sa-saopaulo-1':
timezdiff = -3
elif region == 'sa-vinhedo-1':
timezdiff = -3
elif region == 'us-ashburn-1':
timezdiff = -4
elif region == 'us-gov-ashburn-1':
timezdiff = -4
elif region == 'us-gov-chicago-1':
timezdiff = -5
elif region == 'us-gov-fortworth-1':
timezdiff = -5
elif region == 'us-gov-fortworth-2':
timezdiff = -5
elif region == 'us-gov-phoenix-1':
timezdiff = -7
elif region == 'us-gov-sterling-1 ':
timezdiff = -4
elif region == 'us-gov-sterling-2':
timezdiff = -4
elif region == 'us-langley-1':
timezdiff = -5
elif region == 'us-luke-1':
timezdiff = -7
elif region == 'us-phoenix-1':
timezdiff = -7
elif region == 'us-sanjose-1':
timezdiff = -7
else:
timezdiff = 0
# Get current host time
current_time = current_host_time
# if need to use region time
if not ignore_region_time:
current_time = current_utc_time + datetime.timedelta(hours=timezdiff)
# get the variables to return
iDayOfWeek = current_time.weekday() # Day of week as a number
iDay = calendar.day_name[iDayOfWeek] # Day of week as string
iCurrentHour = current_time.hour
return iDayOfWeek, iDay, iCurrentHour
##########################################################################
# Create signer for Authentication
# Input - config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
MakeOut("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
MakeOut("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
MakeOut("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
oci.config.DEFAULT_LOCATION,
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##########################################################################
# Configure logging output
##########################################################################
def MakeOut(msg, no_end=False):
logging.basicConfig(filename='log.out', format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
logging.warning(msg)
def MakeLog(msg, no_end=False):
file_path = 'log.log'
sys.stdout = open(file_path, "w")
print(datetime.datetime.today().strftime('%d/%m/%Y %I:%M:%S.%f %p') + "\t" + msg)
##########################################################################
# isWeekDay
##########################################################################
def isWeekDay(day):
weekday = True
if AlternativeWeekend:
if day == 4 or day == 5:
weekday = False
else:
if day == 5 or day == 6:
weekday = False
return weekday
###############################################
# isDeleted
###############################################
def isDeleted(state):
deleted = False
try:
if state == "TERMINATED" or state == "TERMINATING":
deleted = True
if state == "DELETED" or state == "DELETING":
deleted = True
except Exception:
deleted = True
MakeOut("No lifecyclestate found, ignoring resource")
MakeOut(state)
return deleted
###############################################
# AutonomousThread
###############################################
class AutonomousThread(threading.Thread):
def __init__(self, threadID, ID, NAME, CPU):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.CPU = CPU
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Autonomous DB {} and after that scaling to {} cpus".format(self.NAME, self.CPU))
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=self.ID)
Retry = False
success.append("Started Autonomous DB {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting Autonomous DB {}".format(response.status, self.NAME))
Retry = False
response = database.get_autonomous_database(autonomous_database_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "AVAILABLE":
response = database.get_autonomous_database(autonomous_database_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = self.CPU
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=self.ID, update_autonomous_database_details=dbupdate)
Retry = False
success.append("Autonomous DB {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append(" - Error ({}) re-scaling to {} cpus for {}".format(response.status, self.CPU, self.NAME))
Retry = False
###############################################
# PoolThread
###############################################
class PoolThread(threading.Thread):
def __init__(self, threadID, ID, NAME, INSTANCES):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.INSTANCES = INSTANCES
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Instance Pool {} and after that scaling to {} instances".format(self.NAME, self.INSTANCES))
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=self.ID)
Retry = False
success.append(" - Starting Instance Pool {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append(" - Error ({}) starting instance pool {}".format(response.status, self.NAME))
Retry = False
response = pool.get_instance_pool(instance_pool_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "RUNNING":
response = pool.get_instance_pool(instance_pool_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Instance pool {} started, re-scaling to {} instances".format(self.NAME, self.INSTANCES))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = self.INSTANCES
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=self.ID, update_instance_pool_details=pooldetails)
Retry = False
success.append("Rescaling Instance Pool {} to {} instances".format(self.NAME, self.INSTANCES))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) rescaling instance pool {}".format(response.status, self.NAME))
Retry = False
###############################################
# AnalyticsThread
###############################################
class AnalyticsThread(threading.Thread):
def __init__(self, threadID, ID, NAME, CPU):
threading.Thread.__init__(self)
self.threadID = threadID
self.ID = ID
self.NAME = NAME
self.CPU = CPU
def run(self):
global total_resources
global ErrorsFound
global errors
global success
MakeOut(" - Starting Analytics Service {} and after that scaling to {} cpus".format(self.NAME, self.CPU))
Retry = True
while Retry:
try:
response = analytics.start_analytics_instance(analytics_instance_id=self.ID)
Retry = False
success.append("Started Analytics Service {}".format(self.NAME))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting Analytics Service {}".format(response.status, self.NAME))
Retry = False
response = analytics.get_analytics_instance(analytics_instance_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
while response.data.lifecycle_state != "ACTIVE":
response = analytics.get_analytics_instance(analytics_instance_id=self.ID, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
time.sleep(10)
MakeOut("Analytics Service {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = self.CPU
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
Retry = True
while Retry:
try:
response = analytics.scale_analytics_instance(analytics_instance_id=self.ID, scale_analytics_instance_details=details)
Retry = False
success.append("Analytics Service {} started, re-scaling to {} cpus".format(self.NAME, self.CPU))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
errors.append("Error ({}) re-scaling Analytics to {} cpus for {}".format(response.status, self.CPU, self.NAME))
Retry = False
##########################################################################
# Load compartments
##########################################################################
def identity_read_compartments(identity, tenancy):
MakeOut("Loading Compartments...")
try:
cs = oci.pagination.list_call_get_all_results(
identity.list_compartments,
tenancy.id,
compartment_id_in_subtree=True,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY
).data
# Add root compartment which is not part of the list_compartments
tenant_compartment = oci.identity.models.Compartment()
tenant_compartment.id = tenancy.id
tenant_compartment.name = tenancy.name
tenant_compartment.lifecycle_state = oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE
cs.append(tenant_compartment)
MakeOut(" Total " + str(len(cs)) + " compartments loaded.")
return cs
except Exception as e:
raise RuntimeError("Error in identity_read_compartments: " + str(e.args))
##########################################################################
# Handle Region
##########################################################################
def autoscale_region(region):
# Global Paramters for update
global total_resources
global ErrorsFound
global errors
global success
MakeOut("Starting Auto Scaling script on region {}, executing {} actions".format(region, Action))
threads = [] # Thread array for async AutonomousDB start and rescale
tcount = 0
###############################################
# Get Current Day, time
###############################################
DayOfWeek, Day, CurrentHour = get_current_hour(region, cmd.ignore_region_time)
if AlternativeWeekend:
MakeOut("Using Alternative weekend (Friday and Saturday as weekend")
if cmd.ignore_region_time:
MakeOut("Ignoring Region Datetime, Using local time")
MakeOut("Day of week: {}, IsWeekday: {}, Current hour: {}".format(Day, isWeekDay(DayOfWeek), CurrentHour))
# Array start with 0 so decrease CurrentHour with 1, if hour = 0 then 23
CurrentHour = 23 if CurrentHour == 0 else CurrentHour - 1
###############################################
# Find all resources with a Schedule Tag
###############################################
MakeOut("Getting all resources supported by the search function...")
query = "query all resources where (definedTags.namespace = '{}')".format(PredefinedTag)
query += " && compartmentId = '" + compartment_include + "'" if compartment_include else ""
query += " && compartmentId != '" + compartment_exclude + "'" if compartment_exclude else ""
sdetails = oci.resource_search.models.StructuredSearchDetails()
sdetails.query = query
NoError = True
try:
result = search.search_resources(search_details=sdetails, limit=1000, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
except oci.exceptions.ServiceError as response:
print ("Error: {} - {}".format(response.code, response.message))
result = oci.resource_search.models.ResourceSummaryCollection()
result.items = []
#################################################################
# Find additional resources not found by search (MySQL Service)
#################################################################
if not cmd.ignoremysql:
MakeOut("Finding MySQL instances in {} Compartments...".format(len(compartments)))
for c in compartments:
# check compartment include and exclude
if c.lifecycle_state != oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE:
continue
if compartment_include:
if c.id != compartment_include:
continue
if compartment_exclude:
if c.id == compartment_exclude:
continue
mysql_instances = []
try:
mysql_instances = oci.pagination.list_call_get_all_results(
mysql.list_db_systems,
compartment_id=c.id,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY
).data
except Exception:
MakeOut("e", True)
mysql_instances = []
continue
for mysql_instance in mysql_instances:
if PredefinedTag not in mysql_instance.defined_tags or mysql_instance.lifecycle_state != "ACTIVE":
continue
summary = oci.resource_search.models.ResourceSummary()
summary.availability_domain = mysql_instance.availability_domain
summary.compartment_id = mysql_instance.compartment_id
summary.defined_tags = mysql_instance.defined_tags
summary.freeform_tags = mysql_instance.freeform_tags
summary.identifier = mysql_instance.id
summary.lifecycle_state = mysql_instance.lifecycle_state
summary.display_name = mysql_instance.display_name
summary.resource_type = "MysqlDBInstance"
result.items.append(summary)
MakeOut("")
#################################################################
# All the items with a schedule are now collected.
# Let's go thru them and find / validate the correct schedule
#################################################################
total_resources += len(result.items)
MakeOut("")
MakeOut("Checking {} Resources for Auto Scale...".format(len(result.items)))
for resource in result.items:
# The search data is not always updated. Get the tags from the actual resource itself, not using the search data.
resourceOk = False
if cmd.print_ocid:
MakeOut("Checking {} ({}) - {}...".format(resource.display_name, resource.resource_type, resource.identifier))
else:
MakeOut("Checking {} ({})...".format(resource.display_name, resource.resource_type))
if resource.resource_type == "Instance":
resourceDetails = compute.get_instance(instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "DbSystem":
resourceDetails = database.get_db_system(db_system_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "VmCluster":
resourceDetails = database.get_vm_cluster(vm_cluster_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "AutonomousDatabase":
resourceDetails = database.get_autonomous_database(autonomous_database_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "InstancePool":
resourceDetails = pool.get_instance_pool(instance_pool_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "OdaInstance":
resourceDetails = oda.get_oda_instance(oda_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "AnalyticsInstance":
resourceDetails = analytics.get_analytics_instance(analytics_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "IntegrationInstance":
resourceDetails = integration.get_integration_instance(integration_instance_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "LoadBalancer":
resourceDetails = loadbalancer.get_load_balancer(load_balancer_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "MysqlDBInstance":
resourceDetails = mysql.get_db_system(db_system_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "GoldenGateDeployment":
resourceDetails = goldengate.get_deployment(deployment_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if resource.resource_type == "DISWorkspace":
resourceDetails = dataintegration.get_workspace(workspace_id=resource.identifier, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
resourceOk = True
if not isDeleted(resource.lifecycle_state) and resourceOk:
schedule = resourceDetails.defined_tags[PredefinedTag]
ActiveSchedule = ""
if AnyDay in schedule:
ActiveSchedule = schedule[AnyDay]
if isWeekDay(DayOfWeek): # check for weekday / weekend
if WeekDay in schedule:
ActiveSchedule = schedule[WeekDay]
else:
if Weekend in schedule:
ActiveSchedule = schedule[Weekend]
if Day in schedule: # Check for day specific tag (today)
ActiveSchedule = schedule[Day]
#################################################################
# Check if the active schedule contains exactly 24 numbers for each hour of the day
#################################################################
if ActiveSchedule != "":
try:
schedulehours = ActiveSchedule.split(",")
if len(schedulehours) != 24:
ErrorsFound = True
errors.append(" - Error with schedule of {} - {}, not correct amount of hours, I count {}".format(resource.display_name, ActiveSchedule, len(schedulehours)))
MakeOut(" - Error with schedule of {} - {}, not correct amount of hours, i count {}".format(resource.display_name, ActiveSchedule, len(schedulehours)))
ActiveSchedule = ""
except Exception:
ErrorsFound = True
ActiveSchedule = ""
errors.append(" - Error with schedule for {}".format(resource.display_name))
MakeOut(" - Error with schedule of {}".format(resource.display_name))
MakeOut(sys.exc_info()[0])
else:
MakeOut(" - Ignoring instance, as no active schedule for today found")
###################################################################################
# if schedule validated, let see if we can apply the new schedule to the resource
###################################################################################
if ActiveSchedule != "":
DisplaySchedule = ""
c = 0
for h in schedulehours:
if c == CurrentHour:
DisplaySchedule = DisplaySchedule + "[" + h + "],"
else:
DisplaySchedule = DisplaySchedule + h + ","
c = c + 1
MakeOut(" - Active schedule for {}: {}".format(resource.display_name, DisplaySchedule))
if schedulehours[CurrentHour] == "*":
MakeOut(" - Ignoring this service for this hour")
else:
###################################################################################
# Instance
###################################################################################
if resource.resource_type == "Instance":
# Only perform action if VM Instance, ignoring any BM instances.
if resourceDetails.shape[:2] == "VM":
######## WAIT UNTIL THE INSTANCE HAS A VALID STATE (RUNNING OR STOPPED) ########
while (compute.get_instance(resource.identifier).data.lifecycle_state.upper() != "RUNNING" and compute.get_instance(resource.identifier).data.lifecycle_state.upper() != "STOPPED") :
time.sleep(5)
######## SHUTDOWN ########
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
MakeOut(" - Initiate Compute VM shutdown for {}".format(resource.display_name))
MakeLog("[STOP] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action=ComputeShutdownMethod)
Retry = False
success.append(" - Initiate Compute VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Compute VM Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
######## SCALE UP/DOWN ########
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) != 0:
if int(resourceDetails.shape_config.ocpus) != int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM scale for {}".format(resource.display_name))
MakeLog("[SCALE] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.update_instance(instance_id=resource.identifier, update_instance_details=oci.core.models.UpdateInstanceDetails(shape_config=oci.core.models.UpdateInstanceShapeConfigDetails(ocpus=int(schedulehours[CurrentHour]),memory_in_gbs=int(resourceDetails.shape_config.memory_in_gbs))))
Retry = False
success.append(" - Initiate Compute VM scale for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM scale for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Compute VM scale for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
######## START AND SCALE UP/DOWN ########
if int(resourceDetails.shape_config.ocpus) != int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM startup and scale for {}".format(resource.display_name))
MakeLog("[START | SCALE] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action="START")
while compute.get_instance(resource.identifier).data.lifecycle_state != "RUNNING" :
time.sleep(5)
response = compute.update_instance(instance_id=resource.identifier, update_instance_details=oci.core.models.UpdateInstanceDetails(shape_config=oci.core.models.UpdateInstanceShapeConfigDetails(ocpus=int(schedulehours[CurrentHour]),memory_in_gbs=int(resourceDetails.shape_config.memory_in_gbs))))
Retry = False
success.append(" - Initiate Compute VM startup and scale for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM startup and scale for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
######## START ########
if int(resourceDetails.shape_config.ocpus) == int(schedulehours[CurrentHour]) :
MakeOut(" - Initiate Compute VM startup for {}".format(resource.display_name))
MakeLog("[START] Instance {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = compute.instance_action(instance_id=resource.identifier, action="START")
Retry = False
success.append(" - Initiate Compute VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Compute VM startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# DBSystem
###################################################################################
if resource.resource_type == "DbSystem":
# Execute On/Off operations for Database VMs
if resourceDetails.shape[:2] == "VM":
dbnodes = database.list_db_nodes(compartment_id=resource.compartment_id, db_system_id=resource.identifier).data
for dbnodedetails in dbnodes:
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if dbnodedetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
MakeOut(" - Initiate DB VM shutdown for {}".format(resource.display_name))
MakeLog("[STOP] DB VM {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="STOP")
Retry = False
success.append(" - Initiate DB VM shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB VM shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if dbnodedetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) == 1:
MakeOut(" - Initiate DB VM startup for {}".format(resource.display_name))
MakeLog("[START] DB VM {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.db_node_action(db_node_id=dbnodedetails.id, action="START")
Retry = False
success.append(" - Initiate DB VM startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB VM startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# BM
###################################################################################
if resourceDetails.shape[:2] == "BM":
if int(schedulehours[CurrentHour]) > 1 and int(schedulehours[CurrentHour]) < 53:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate DB BM Scale Down to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(
" - Initiate DB BM Scale Down from {}to {} for {}".format(resourceDetails.cpu_core_count, (schedulehours[CurrentHour]),
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB BM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count,
int(schedulehours[CurrentHour]),
resource.display_name, response.message))
MakeOut(" - Error ({}) DB BM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count,
int(schedulehours[CurrentHour]),
resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate DB BM Scale UP to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(
" - Initiate DB BM Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]),
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) DB BM Scale UP from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) DB BM Scale UP from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# Exadata
###################################################################################
if resourceDetails.shape[:7] == "Exadata":
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Exadata CS Scale Down from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(" - Initiate Exadata DB Scale Down to {} at {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Exadata DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Exadata DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Exadata CS Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateDbSystemDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_db_system(db_system_id=resource.identifier, update_db_system_details=dbupdate)
Retry = False
success.append(" - Initiate Exadata DB BM Scale UP from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Exadata DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Exadata DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# VmCluster
###################################################################################
if resource.resource_type == "VmCluster":
if int(schedulehours[CurrentHour]) >= 0 and int(schedulehours[CurrentHour]) < 401:
# Cluster VM is running, request is amount of CPU core change is needed
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) > 0:
if resourceDetails.cpus_enabled > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate ExadataC@C VM Cluster Scale Down to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateVmClusterDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_vm_cluster(vm_cluster_id=resource.identifier, update_vm_cluster_details=dbupdate)
Retry = False
success.append(" - Initiate ExadataC&C Cluster VM Scale Down from {} to {} for {}".format(resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ExadataC&C Cluster VM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) ExadataC&C Cluster VM Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpus_enabled < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(
" - Initiate ExadataC@C VM Cluster Scale Up from {} to {} for {}".format(resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateVmClusterDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_vm_cluster(vm_cluster_id=resource.identifier, update_vm_cluster_details=dbupdate)
Retry = False
success.append(" - Initiate ExadataC&C Cluster VM Scale Up to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ExadataC&C Cluster VM Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) ExadataC&C Cluster VM Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpus_enabled, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
###################################################################################
# AutonomousDatabase
###################################################################################
# Execute CPU Scale Up/Down operations for Database BMs
if resource.resource_type == "AutonomousDatabase":
if int(schedulehours[CurrentHour]) >= 0 and int(schedulehours[CurrentHour]) < 129:
# Autonomous DB is running request is amount of CPU core change is needed
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) > 0:
if resourceDetails.cpu_core_count > int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Autonomous DB Scale Down to {} for {}".format(int(schedulehours[CurrentHour]),
resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier, update_autonomous_database_details=dbupdate)
Retry = False
success.append(" - Initiate Autonomous DB Scale Down from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Scale Down from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
if resourceDetails.cpu_core_count < int(schedulehours[CurrentHour]):
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Autonomous DB Scale Up from {} to {} for {}".format(resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name))
dbupdate = oci.database.models.UpdateAutonomousDatabaseDetails()
dbupdate.cpu_core_count = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = database.update_autonomous_database(autonomous_database_id=resource.identifier, update_autonomous_database_details=dbupdate)
Retry = False
success.append(" - Initiate Autonomous DB Scale Up to {} for {}".format(int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Scale Up from {} to {} for {} - {}".format(response.status, resourceDetails.cpu_core_count, int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
# Autonomous DB is running request is to stop the database
if resourceDetails.lifecycle_state == "AVAILABLE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Stoping Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.stop_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append(" - Initiate Autonomous DB Shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Autonomous DB is stopped and needs to be started with same amount of CPUs configured
if resourceDetails.cpu_core_count == int(schedulehours[CurrentHour]):
MakeOut(" - Starting Autonomous DB {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = database.start_autonomous_database(autonomous_database_id=resource.identifier)
Retry = False
success.append(" - Initiate Autonomous DB Startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Autonomous DB Startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Autonomous DB Startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Autonomous DB is stopped and needs to be started, after that it requires CPU change
if resourceDetails.cpu_core_count != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = AutonomousThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
###################################################################################
# InstancePool
###################################################################################
if resource.resource_type == "InstancePool":
# Stop Resource pool action
if resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
success.append(" - Stopping instance pool {}".format(resource.display_name))
MakeOut(" - Stopping instance pool {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.stop_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append(" - Stopping instance pool {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Stopping instance pool for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Stopping instance pool for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Scale up action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) > resourceDetails.size:
if Action == "All" or Action == "Up":
MakeOut(" - Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append(" - Scaling up instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Scaling up instance pool {} to {} instances - {}".format(response.status, resource.display_name, int(schedulehours[CurrentHour]), response.message))
Retry = False
# Scale down action on running instance pool
elif resourceDetails.lifecycle_state == "RUNNING" and int(schedulehours[CurrentHour]) < resourceDetails.size:
if Action == "All" or Action == "Down":
MakeOut(" - Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
pooldetails = oci.core.models.UpdateInstancePoolDetails()
pooldetails.size = int(schedulehours[CurrentHour])
Retry = True
while Retry:
try:
response = pool.update_instance_pool(instance_pool_id=resource.identifier, update_instance_pool_details=pooldetails)
Retry = False
success.append(" - Scaling down instance pool {} to {} instances".format(resource.display_name, int(schedulehours[CurrentHour])))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Scaling down instance pool {} to {} instances - {}".format(response.status, resource.display_name, int(schedulehours[CurrentHour]), response.message))
Retry = False
elif resourceDetails.lifecycle_state == "STOPPED" and int(schedulehours[CurrentHour]) > 0:
if Action == "All" or Action == "Up":
# Start instance pool with same amount of instances as configured
if resourceDetails.size == int(schedulehours[CurrentHour]):
success.append(" - Starting instance pool {} from stopped state".format(resource.display_name))
MakeOut(" - Starting instance pool {} from stopped state".format(resource.display_name))
Retry = True
while Retry:
try:
response = pool.start_instance_pool(instance_pool_id=resource.identifier)
Retry = False
success.append(" - Starting instance pool {} from stopped state".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Starting instance pool {} from stopped state - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Start instance pool and after that resize the instance pool to desired state:
if resourceDetails.size != int(schedulehours[CurrentHour]):
tcount = tcount + 1
thread = PoolThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
###################################################################################
# OdaInstance
###################################################################################
if resource.resource_type == "OdaInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate ODA shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = oda.stop_oda_instance(oda_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ODA Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) ODA Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate ODA startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = oda.start_oda_instance(oda_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) ODA startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) ODA startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# AnalyticsInstance
###################################################################################
if resource.resource_type == "AnalyticsInstance":
# Execute Shutdown operations
if int(schedulehours[CurrentHour]) == 0 and resourceDetails.lifecycle_state == "ACTIVE":
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Analytics shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = analytics.stop_analytics_instance(analytics_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate ODA shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Execute Startup operations
if int(schedulehours[CurrentHour]) != 0 and resourceDetails.lifecycle_state == "INACTIVE":
if Action == "All" or Action == "Up":
if int(resourceDetails.capacity.capacity_value) == int(schedulehours[CurrentHour]):
MakeOut(" - Initiate Analytics Startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = analytics.start_analytics_instance(analytics_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Analytics Startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics Startup for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics Startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
# Execute Startup and scaling operations
else:
tcount = tcount + 1
thread = AnalyticsThread(tcount, resource.identifier, resource.display_name, int(schedulehours[CurrentHour]))
thread.start()
threads.append(thread)
# Execute scaling operations on running instance
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) != int(resourceDetails.capacity.capacity_value):
if int(resourceDetails.capacity.capacity_value) == 1 or int(resourceDetails.capacity.capacity_value) > 12:
ErrorsFound = True
errors.append(
" - Error (Analytics instance with CPU count {} can not be scaled for instance: {}".format(int(resourceDetails.capacity.capacity_value),
resource.display_name))
MakeOut(
" - Error (Analytics instance with CPU count {} can not be scaled for instance: {}".format(int(resourceDetails.capacity.capacity_value),
resource.display_name))
goscale = False
if (int(schedulehours[CurrentHour]) >= 2 and int(schedulehours[CurrentHour]) <= 8) and (
int(resourceDetails.capacity.capacity_value) >= 2 and int(resourceDetails.capacity.capacity_value) <= 8):
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = int(schedulehours[CurrentHour])
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
goscale = True
if (int(schedulehours[CurrentHour]) >= 10 and int(schedulehours[CurrentHour]) <= 12) and (int(resourceDetails.capacity.capacity_value) >= 10 and int(resourceDetails.capacity.capacity_value) <= 12):
capacity = oci.analytics.models.capacity.Capacity()
capacity.capacity_value = int(schedulehours[CurrentHour])
capacity.capacity_type = capacity.CAPACITY_TYPE_OLPU_COUNT
details = oci.analytics.models.ScaleAnalyticsInstanceDetails()
details.capacity = capacity
goscale = True
if goscale:
goscale = False
if Action == "All":
goscale = True
elif int(resourceDetails.capacity.capacity_value) < int(schedulehours[CurrentHour]) and Action == "Up":
goscale = True
elif int(resourceDetails.capacity.capacity_value) > int(schedulehours[CurrentHour]) and Action == "Down":
goscale = True
if goscale:
MakeOut(" - Initiate Analytics Scaling from {} to {}oCPU for {}".format(
int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]),
resource.display_name))
Retry = True
while Retry:
try:
response = analytics.scale_analytics_instance(analytics_instance_id=resource.identifier, scale_analytics_instance_details=details)
Retry = False
success.append(" - Initiate Analytics Scaling from {} to {}oCPU for {}".format(int(resourceDetails.capacity.capacity_value),
int(schedulehours[CurrentHour]), resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
MakeOut(" - Error ({}) Analytics scaling from {} to {}oCPU for {} - {}".format(response.status, int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name, response.message))
Retry = False
else:
errors.append(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
MakeOut(" - Error (Analytics scaling from {} to {}oCPU, invalid combination for {}".format(int(resourceDetails.capacity.capacity_value), int(schedulehours[CurrentHour]), resource.display_name))
###################################################################################
# IntegrationInstance
###################################################################################
if resource.resource_type == "IntegrationInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
resourceDetails = integration.get_integration_instance(integration_instance_id=resource.identifier).data
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.stop_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Integration Service startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = integration.start_integration_instance(integration_instance_id=resource.identifier)
Retry = False
success.append(" - Initiate Integration Service startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
MakeOut(" - Error ({}) Integration Service startup for {} - {}".format(response.message, resource.display_name, response.message))
Retry = False
###################################################################################
# LoadBalancer
###################################################################################
if resource.resource_type == "LoadBalancer":
requestedShape = int(schedulehours[CurrentHour])
shape = 0
if resourceDetails.shape_name == "10Mbps":
shape = 10
if resourceDetails.shape_name == "100Mbps":
shape = 100
if resourceDetails.shape_name == "400Mbps":
shape = 400
if resourceDetails.shape_name == "8000Mbps":
shape = 8000
if requestedShape == 10 or requestedShape == 100 or requestedShape == 400 or requestedShape == 8000:
if requestedShape < shape:
if Action == "All" or Action == "Down":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Downsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Downsizing: {}".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
if requestedShape > shape:
if Action == "All" or Action == "Up":
details = oci.load_balancer.models.UpdateLoadBalancerShapeDetails()
details.shape_name = "{}Mbps".format(requestedShape)
MakeOut(" - Upsizing loadbalancer from {} to {}".format(resourceDetails.shape_name, details.shape_name))
try:
loadbalancer.update_load_balancer_shape(load_balancer_id=resource.identifier, update_load_balancer_shape_details=details,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except oci.exceptions.ServiceError as response:
MakeOut(" - Error Upsizing: {} ".format(response.message))
errors.append(" - Error ({}) Integration Service startup for {}".format(response.message, resource.display_name))
else:
MakeOut(" - Error {}: requested shape {} does not exists".format(resource.display_name, requestedShape))
###################################################################################
# MysqlDBInstance
###################################################################################
if resource.resource_type == "MysqlDBInstance":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate MySQL shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.mysql.models.StopDbSystemDetails()
stopaction.shutdown_type = "SLOW"
response = mysql.stop_db_system(db_system_id=resource.identifier, stop_db_system_details=stopaction)
Retry = False
success.append(" - Initiate MySql shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) MySQL Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate MySQL startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = mysql.start_db_system(db_system_id=resource.identifier)
Retry = False
success.append(" - Initiate MySQL startup for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) MySQL startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# GoldenGateDeployment
###################################################################################
if resource.resource_type == "GoldenGateDeployment":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
stopaction = oci.golden_gate.models.StopDeploymentDetails()
stopaction.type = "DEFAULT"
response = goldengate.stop_deployment(deployment_id=resource.identifier, stop_deployment_details=stopaction)
Retry = False
success.append(" - Initiate GoldenGate shutdown for {}".format(resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) GoldenGate Shutdown for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "INACTIVE" and int(schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
startaction = oci.golden_gate.models.StartDeploymentDetails()
startaction.type = "DEFAULT"
response = goldengate.start_deployment(deployment_id=resource.identifier, start_deployment_details=startaction)
if response.status == 200:
success.append(" - Initiate GoldenGate startup for {}".format(resource.display_name))
Retry = False
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) GoldenGate startup for {} - {}".format(response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Data Integration Workshop
###################################################################################
if resource.resource_type == "DISWorkspace":
if int(schedulehours[CurrentHour]) == 0 or int(schedulehours[CurrentHour]) == 1:
if resourceDetails.lifecycle_state == "ACTIVE" and int(
schedulehours[CurrentHour]) == 0:
if Action == "All" or Action == "Down":
MakeOut(" - Initiate Data Integration Workspace shutdown for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.stop_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace shutdown for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(
" - Error ({}) Data Integration Workspace Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
MakeOut(" - Error ({}) Data Integration Shutdown for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
if resourceDetails.lifecycle_state == "STOPPED" and int(
schedulehours[CurrentHour]) == 1:
if Action == "All" or Action == "Up":
MakeOut(" - Initiate Data Integration Workspace startup for {}".format(resource.display_name))
Retry = True
while Retry:
try:
response = dataintegration.start_workspace(workspace_id=resource.identifier)
Retry = False
success.append(" - Initiate Data Integration Workspace startup for {}".format(
resource.display_name))
except oci.exceptions.ServiceError as response:
if response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(
RateLimitDelay))
time.sleep(RateLimitDelay)
else:
ErrorsFound = True
errors.append(" - Error ({}) Data Integration Startup startup for {} - {}".format(
response.status, resource.display_name, response.message))
Retry = False
###################################################################################
# Wait for any AutonomousDB and Instance Pool Start and rescale tasks completed
###################################################################################
MakeOut("Waiting for all threads to complete...")
for t in threads:
t.join()
MakeOut("Region {} Completed.".format(region))
##########################################################################
# Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-ip', action='store_true', default=True, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-a', default="All", dest='action', help='Action All, Down, Up')
parser.add_argument('-tag', default="Schedule", dest='tag', help='Tag to examine, Default=Schedule')
parser.add_argument('-rg', default="", dest='filter_region', help='Filter Region')
parser.add_argument('-ic', default="", dest='compartment_include', help='Include Compartment OCID')
parser.add_argument('-ec', default="", dest='compartment_exclude', help='Exclude Compartment OCID')
parser.add_argument('-ignrtime', action='store_true', default=False, dest='ignore_region_time', help='Ignore Region Time - Use Host Time')
parser.add_argument('-ignoremysql', action='store_true', default=False, dest='ignoremysql', help='Ignore MYSQL processing')
parser.add_argument('-printocid', action='store_true', default=False, dest='print_ocid', help='Print OCID for resources')
parser.add_argument('-topic', default="", dest='topic', help='Topic to send summary in home region')
cmd = parser.parse_args()
if cmd.action != "All" and cmd.action != "Down" and cmd.action != "Up":
parser.print_help()
sys.exit(0)
####################################
# Assign variables
####################################
filter_region = cmd.filter_region
Action = cmd.action
PredefinedTag = cmd.tag
compartment_exclude = cmd.compartment_exclude if cmd.compartment_exclude else ""
compartment_include = cmd.compartment_include if cmd.compartment_include else ""
####################################
# Start print time info
####################################
start_time = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print_header("Running Auto Scale")
# Identity extract compartments
config, signer = create_signer(cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
compartments = []
tenancy = None
tenancy_home_region = ""
try:
MakeOut("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
MakeOut("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
tenancy = identity.get_tenancy(config["tenancy"]).data
regions = identity.list_region_subscriptions(tenancy.id).data
for reg in regions:
if reg.is_home_region:
tenancy_home_region = str(reg.region_name)
MakeOut("")
MakeOut("Version : " + str(Version))
MakeOut("Command Line : " + ' '.join(x for x in sys.argv[1:]))
MakeOut("Tenant Name : " + str(tenancy.name))
MakeOut("Tenant Id : " + tenancy.id)
MakeOut("Home Region : " + tenancy_home_region)
MakeOut("Action : " + Action)
MakeOut("Tag : " + PredefinedTag)
if cmd.topic:
MakeOut("Topic : " + cmd.topic)
if cmd.filter_region:
MakeOut("Filter Region : " + cmd.filter_region)
MakeOut("")
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
raise RuntimeError("\nError connecting to Identity Service - " + str(e))
############################################
# Define Global Variables to store info
############################################
success = []
errors = []
total_resources = 0
ErrorsFound = False
############################################
# Loop on all regions
############################################
for region_name in [str(es.region_name) for es in regions]:
if cmd.filter_region:
if cmd.filter_region not in region_name:
continue
print_header("Region " + region_name)
# set the region in the config and signer
config['region'] = region_name
signer.region = region_name
###############################################
# services - global used by threads as well
###############################################
compute = oci.core.ComputeClient(config, signer=signer)
database = oci.database.DatabaseClient(config, signer=signer)
pool = oci.core.ComputeManagementClient(config, signer=signer)
search = oci.resource_search.ResourceSearchClient(config, signer=signer)
oda = oci.oda.OdaClient(config, signer=signer)
analytics = oci.analytics.AnalyticsClient(config, signer=signer)
integration = oci.integration.IntegrationInstanceClient(config, signer=signer)
loadbalancer = oci.load_balancer.LoadBalancerClient(config, signer=signer)
mysql = oci.mysql.DbSystemClient(config, signer=signer)
goldengate = oci.golden_gate.GoldenGateClient(config, signer=signer)
dataintegration = oci.data_integration.DataIntegrationClient(config, signer=signer)
###############################################
# Run Scale Region
###############################################
autoscale_region(region_name)
############################################
# Send summary if Topic Specified
############################################
if cmd.topic:
# set the home region in the config and signer
config['region'] = tenancy_home_region
signer.region = tenancy_home_region
ns = oci.ons.NotificationDataPlaneClient(config, signer=signer)
if LogLevel == "ALL" or (LogLevel == "ERRORS" and ErrorsFound):
MakeOut("\nPublishing notification")
body_message = "Scaling ({}) just completed. Found {} errors across {} scaleable instances (from a total of {} instances). \nError Details: {}\n\nSuccess Details: {}".format(Action, len(errors), len(success), total_resources, errors, success)
Retry = True
while Retry:
try:
ns_response = ns.publish_message(cmd.topic, {"title": "Scaling Script ran across tenancy: {}".format(tenancy.name), "body": body_message})
Retry = False
except oci.exceptions.ServiceError as ns_response:
if ns_response.status == 429:
MakeOut("Rate limit kicking in.. waiting {} seconds...".format(RateLimitDelay))
time.sleep(RateLimitDelay)
else:
MakeOut("Error ({}) publishing notification - {}".format(ns_response.status, ns_response.message))
Retry = False
MakeOut("All scaling tasks done, checked {} resources.".format(total_resources))
|
[] |
[] |
[
"OCI_CONFIG_FILE",
"OCI_CONFIG_PROFILE"
] |
[]
|
["OCI_CONFIG_FILE", "OCI_CONFIG_PROFILE"]
|
python
| 2 | 0 | |
gcsfs/core.py
|
# -*- coding: utf-8 -*-
"""
Google Cloud Storage pythonic interface
"""
import textwrap
import fsspec
import decorator
from base64 import b64encode, b64decode
import google.auth as gauth
import google.auth.compute_engine
import google.auth.credentials
from google.auth.transport.requests import AuthorizedSession
from google.auth.exceptions import GoogleAuthError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.oauth2 import service_account
from hashlib import md5
import io
import json
import logging
import traceback
import os
import posixpath
import pickle
import re
import requests
import time
import warnings
import random
from requests.exceptions import RequestException, ProxyError
from .utils import HttpError, RateLimitException, is_retriable
logger = logging.getLogger(__name__)
# Allow optional tracing of call locations for api calls.
# Disabled by default to avoid *massive* test logs.
_TRACE_METHOD_INVOCATIONS = False
@decorator.decorator
def _tracemethod(f, self, *args, **kwargs):
logger.debug("%s(args=%s, kwargs=%s)", f.__name__, args, kwargs)
if _TRACE_METHOD_INVOCATIONS and logger.isEnabledFor(logging.DEBUG - 1):
tb_io = io.StringIO()
traceback.print_stack(file=tb_io)
logger.log(logging.DEBUG - 1, tb_io.getvalue())
return f(self, *args, **kwargs)
# client created 2018-01-16
not_secret = {
"client_id": "586241054156-9kst7ltfj66svc342pcn43vp6ta3idin"
".apps.googleusercontent.com",
"client_secret": "xto0LIFYX35mmHF9T1R2QBqT",
}
client_config = {
"installed": {
"client_id": not_secret["client_id"],
"client_secret": not_secret["client_secret"],
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
}
}
tfile = os.path.join(os.path.expanduser("~"), ".gcs_tokens")
ACLs = {
"authenticatedread",
"bucketownerfullcontrol",
"bucketownerread",
"private",
"projectprivate",
"publicread",
}
bACLs = {
"authenticatedRead",
"private",
"projectPrivate",
"publicRead",
"publicReadWrite",
}
DEFAULT_PROJECT = os.environ.get("GCSFS_DEFAULT_PROJECT", "")
GCS_MIN_BLOCK_SIZE = 2 ** 18
DEFAULT_BLOCK_SIZE = 5 * 2 ** 20
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace("/", "%2F")
s = s.replace(" ", "%20")
return s
def norm_path(path):
"""Canonicalize path to '{bucket}/{name}' form."""
return "/".join(split_path(path))
def split_path(path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`.
Path is of the form: '[gs|gcs://]bucket[/key]'
Returns
-------
(bucket, key) tuple
Examples
--------
>>> split_path("gcs://mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("gs://mybucket")
['mybucket', '']
"""
if path.startswith("gcs://"):
path = path[6:]
if path.startswith("gs://"):
path = path[5:]
if path.startswith("/"):
path = path[1:]
if "/" not in path:
return path, ""
else:
return path.split("/", 1)
def validate_response(r, path):
"""
Check the requests object r, raise error if it's not ok.
Parameters
----------
r: requests response object
path: associated URL path, for error messages
"""
if not r.ok:
m = str(r.content)
error = None
try:
error = r.json()["error"]
msg = error["message"]
except: # noqa: E722
# TODO: limit to appropriate exceptions
msg = str(r.content)
if r.status_code == 404:
raise FileNotFoundError
elif r.status_code == 403:
raise IOError("Forbidden: %s\n%s" % (path, msg))
elif r.status_code == 429:
raise RateLimitException(error)
elif r.status_code == 502:
raise ProxyError()
elif "invalid" in m:
raise ValueError("Bad Request: %s\n%s" % (path, msg))
elif error:
raise HttpError(error)
else:
raise RuntimeError(m)
class GCSFileSystem(fsspec.AbstractFileSystem):
r"""
Connect to Google Cloud Storage.
The following modes of authentication are supported:
- ``token=None``, GCSFS will attempt to guess your credentials in the
following order: gcloud CLI default, gcsfs cached token, google compute
metadata service, anonymous.
- ``token='google_default'``, your default gcloud credentials will be used,
which are typically established by doing ``gcloud login`` in a terminal.
- ``token=='cache'``, credentials from previously successful gcsfs
authentication will be used (use this after "browser" auth succeeded)
- ``token='anon'``, no authentication is preformed, and you can only
access data which is accessible to allUsers (in this case, the project and
access level parameters are meaningless)
- ``token='browser'``, you get an access code with which you can
authenticate via a specially provided URL
- if ``token='cloud'``, we assume we are running within google compute
or google container engine, and query the internal metadata directly for
a token.
- you may supply a token generated by the
[gcloud](https://cloud.google.com/sdk/docs/)
utility; this is either a python dictionary, the name of a file
containing the JSON returned by logging in with the gcloud CLI tool,
or a Credentials object. gcloud typically stores its tokens in locations
such as
``~/.config/gcloud/application_default_credentials.json``,
`` ~/.config/gcloud/credentials``, or
``~\AppData\Roaming\gcloud\credentials``, etc.
Specific methods, (eg. `ls`, `info`, ...) may return object details from GCS.
These detailed listings include the
[object resource](https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
GCS *does not* include "directory" objects but instead generates
directories by splitting
[object names](https://cloud.google.com/storage/docs/key-terms).
This means that, for example,
a directory does not need to exist for an object to be created within it.
Creating an object implicitly creates it's parent directories, and removing
all objects from a directory implicitly deletes the empty directory.
`GCSFileSystem` generates listing entries for these implied directories in
listing apis with the object properies:
- "name" : string
The "{bucket}/{name}" path of the dir, used in calls to
GCSFileSystem or GCSFile.
- "bucket" : string
The name of the bucket containing this object.
- "kind" : 'storage#object'
- "size" : 0
- "storageClass" : 'DIRECTORY'
- type: 'directory' (fsspec compat)
GCSFileSystem maintains a per-implied-directory cache of object listings and
fulfills all object information and listing requests from cache. This implied, for example, that objects
created via other processes *will not* be visible to the GCSFileSystem until the cache
refreshed. Calls to GCSFileSystem.open and calls to GCSFile are not effected by this cache.
In the default case the cache is never expired. This may be controlled via the `cache_timeout`
GCSFileSystem parameter or via explicit calls to `GCSFileSystem.invalidate_cache`.
Parameters
----------
project : string
project_id to work under. Note that this is not the same as, but often
very similar to, the project name.
This is required in order
to list all the buckets you have access to within a project and to
create/delete buckets, or update their access policies.
If ``token='google_default'``, the value is overriden by the default,
if ``token='anon'``, the value is ignored.
access : one of {'read_only', 'read_write', 'full_control'}
Full control implies read/write as well as modifying metadata,
e.g., access control.
token: None, dict or string
(see description of authentication methods, above)
consistency: 'none', 'size', 'md5'
Check method when writing files. Can be overridden in open().
cache_timeout: float, seconds
Cache expiration time in seconds for object metadata cache.
Set cache_timeout <= 0 for no caching, None for no cache expiration.
secure_serialize: bool
If True, instances re-establish auth upon deserialization; if False,
token is passed directly, which may be a security risk if passed
across an insecure network.
check_connection: bool
When token=None, gcsfs will attempt various methods of establishing
credentials, falling back to anon. It is possible for a method to
find credentials in the system that turn out not to be valid. Setting
this parameter to True will ensure that an actual operation is
attempted before deciding that credentials are valid.
requester_pays : bool, or str default False
Whether to use requester-pays requests. This will include your
project ID `project` in requests as the `userPorject`, and you'll be
billed for accessing data from requester-pays buckets. Optionally,
pass a project-id here as a string to use that as the `userProject`.
"""
scopes = {"read_only", "read_write", "full_control"}
retries = 6 # number of retries on http failure
base = "https://www.googleapis.com/storage/v1/"
default_block_size = DEFAULT_BLOCK_SIZE
protocol = "gcs", "gs"
def __init__(
self,
project=DEFAULT_PROJECT,
access="full_control",
token=None,
block_size=None,
consistency="none",
cache_timeout=None,
secure_serialize=True,
check_connection=False,
requests_timeout=None,
requester_pays=False,
**kwargs
):
if access not in self.scopes:
raise ValueError("access must be one of {}", self.scopes)
if project is None:
warnings.warn("GCS project not set - cannot list or create buckets")
if block_size is not None:
self.default_block_size = block_size
self.project = project
self.requester_pays = requester_pays
self.access = access
self.scope = "https://www.googleapis.com/auth/devstorage." + access
self.consistency = consistency
self.token = token
self.cache_timeout = cache_timeout
self.requests_timeout = requests_timeout
self.check_credentials = check_connection
self._listing_cache = {}
self.connect(method=token)
super().__init__(self, **kwargs)
if not secure_serialize:
self.token = self.session.credentials
@staticmethod
def load_tokens():
"""Get "browser" tokens from disc"""
try:
with open(tfile, "rb") as f:
tokens = pickle.load(f)
# backwards compatability
tokens = {
k: (GCSFileSystem._dict_to_credentials(v) if isinstance(v, dict) else v)
for k, v in tokens.items()
}
except Exception:
tokens = {}
GCSFileSystem.tokens = tokens
def _connect_google_default(self):
credentials, project = gauth.default(scopes=[self.scope])
msg = textwrap.dedent(
"""\
User-provided project '{}' does not match the google default project '{}'. Either
1. Accept the google-default project by not passing a `project` to GCSFileSystem
2. Configure the default project to match the user-provided project (gcloud config set project)
3. Use an authorization method other than 'google_default' by providing 'token=...'
"""
)
if self.project and self.project != project:
raise ValueError(msg.format(self.project, project))
self.project = project
self.session = AuthorizedSession(credentials)
def _connect_cloud(self):
credentials = gauth.compute_engine.Credentials()
self.session = AuthorizedSession(credentials)
def _connect_cache(self):
project, access = self.project, self.access
if (project, access) in self.tokens:
credentials = self.tokens[(project, access)]
self.session = AuthorizedSession(credentials)
def _dict_to_credentials(self, token):
"""
Convert old dict-style token.
Does not preserve access token itself, assumes refresh required.
"""
try:
token = service_account.Credentials.from_service_account_info(
token, scopes=[self.scope]
)
except: # noqa: E722
# TODO: catch specific exceptions
token = Credentials(
None,
refresh_token=token["refresh_token"],
client_secret=token["client_secret"],
client_id=token["client_id"],
token_uri="https://www.googleapis.com/oauth2/v4/token",
scopes=[self.scope],
)
return token
def _connect_token(self, token):
"""
Connect using a concrete token
Parameters
----------
token: str, dict or Credentials
If a str, try to load as a Service file, or next as a JSON; if
dict, try to interpret as credentials; if Credentials, use directly.
"""
if isinstance(token, str):
if not os.path.exists(token):
raise FileNotFoundError(token)
try:
# is this a "service" token?
self._connect_service(token)
return
except: # noqa: E722
# TODO: catch specific exceptions
# some other kind of token file
# will raise exception if is not json
token = json.load(open(token))
if isinstance(token, dict):
credentials = self._dict_to_credentials(token)
elif isinstance(token, google.auth.credentials.Credentials):
credentials = token
else:
raise ValueError("Token format not understood")
self.session = AuthorizedSession(credentials)
def _connect_service(self, fn):
# raises exception if file does not match expectation
credentials = service_account.Credentials.from_service_account_file(
fn, scopes=[self.scope]
)
self.session = AuthorizedSession(credentials)
def _connect_anon(self):
self.session = requests.Session()
def _connect_browser(self):
flow = InstalledAppFlow.from_client_config(client_config, [self.scope])
credentials = flow.run_console()
self.tokens[(self.project, self.access)] = credentials
self._save_tokens()
self.session = AuthorizedSession(credentials)
def connect(self, method=None):
"""
Establish session token. A new token will be requested if the current
one is within 100s of expiry.
Parameters
----------
method: str (google_default|cache|cloud|token|anon|browser) or None
Type of authorisation to implement - calls `_connect_*` methods.
If None, will try sequence of methods.
"""
if method not in [
"google_default",
"cache",
"cloud",
"token",
"anon",
"browser",
None,
]:
self._connect_token(method)
elif method is None:
for meth in ["google_default", "cache", "anon"]:
try:
self.connect(method=meth)
if self.check_credentials and meth != "anon":
self.ls("anaconda-public-data")
logger.debug("Connected with method %s", meth)
except: # noqa: E722
# TODO: catch specific exceptions
self.session = None
logger.debug('Connection with method "%s" failed' % meth)
if self.session:
break
else:
self.__getattribute__("_connect_" + method)()
self.method = method
if self.session is None:
if method is None:
msg = (
"Automatic authentication failed, you should try "
"specifying a method with the token= kwarg"
)
else:
msg = (
"Auth failed with method '%s'. See the docstrings for "
"further details about your auth mechanism, also "
"available at https://gcsfs.readthedocs.io/en/latest/"
"api.html#gcsfs.core.GCSFileSystem" % method
)
raise RuntimeError(msg)
@staticmethod
def _save_tokens():
try:
with open(tfile, "wb") as f:
pickle.dump(GCSFileSystem.tokens, f, 2)
except Exception as e:
warnings.warn("Saving token cache failed: " + str(e))
@_tracemethod
def _call(self, method, path, *args, **kwargs):
for k, v in list(kwargs.items()):
if v is None:
del kwargs[k]
json = kwargs.pop("json", None)
headers = kwargs.pop("headers", None)
data = kwargs.pop("data", None)
r = None
if not path.startswith("http"):
path = self.base + path
if args:
path = path.format(*[quote_plus(p) for p in args])
# needed for requester pays buckets
if self.requester_pays:
if isinstance(self.requester_pays, str):
user_project = self.requester_pays
else:
user_project = self.project
kwargs["userProject"] = user_project
for retry in range(self.retries):
try:
if retry > 0:
time.sleep(min(random.random() + 2 ** (retry - 1), 32))
r = self.session.request(
method,
path,
params=kwargs,
json=json,
headers=headers,
data=data,
timeout=self.requests_timeout,
)
validate_response(r, path)
break
except (
HttpError,
RequestException,
RateLimitException,
GoogleAuthError,
) as e:
if (
isinstance(e, HttpError)
and e.code == 400
and "requester pays" in e.message
):
msg = "Bucket is requester pays. Set `requester_pays=True` when creating the GCSFileSystem."
raise ValueError(msg) from e
if retry == self.retries - 1:
logger.exception("_call out of retries on exception: %s", e)
raise e
if is_retriable(e):
logger.debug("_call retrying after exception: %s", e)
continue
logger.exception("_call non-retriable exception: %s", e)
raise e
return r
@property
def buckets(self):
"""Return list of available project buckets."""
return [b["name"] for b in self._list_buckets()]
@staticmethod
def _process_object(bucket, object_metadata):
"""Process object resource into gcsfs object information format.
Process GCS object resource via type casting and attribute updates to
the cache-able gcsfs object information format. Returns an updated copy
of the object resource.
(See https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
"""
result = dict(object_metadata)
result["size"] = int(object_metadata.get("size", 0))
result["name"] = posixpath.join(bucket, object_metadata["name"])
result["type"] = "file"
return result
@_tracemethod
def _get_object(self, path):
"""Return object information at the given path."""
bucket, key = split_path(path)
# Check if parent dir is in listing cache
parent = "/".join([bucket, posixpath.dirname(key.rstrip("/"))]) + "/"
parent_cache = self._maybe_get_cached_listing(parent)
if parent_cache:
cached_obj = [o for o in parent_cache["items"] if o["name"] == key]
if cached_obj:
logger.debug("found cached object: %s", cached_obj)
return cached_obj[0]
else:
logger.debug("object not found cached parent listing")
raise FileNotFoundError(path)
if not key:
# Attempt to "get" the bucket root, return error instead of
# listing.
raise FileNotFoundError(path)
result = self._process_object(
bucket, self._call("GET", "b/{}/o/{}", bucket, key).json()
)
return result
@_tracemethod
def _maybe_get_cached_listing(self, path):
logger.debug("_maybe_get_cached_listing: %s", path)
if path in self._listing_cache:
retrieved_time, listing = self._listing_cache[path]
cache_age = time.time() - retrieved_time
if self.cache_timeout is not None and cache_age > self.cache_timeout:
logger.debug(
"expired cache path: %s retrieved_time: %.3f cache_age: "
"%.3f cache_timeout: %.3f",
path,
retrieved_time,
cache_age,
self.cache_timeout,
)
del self._listing_cache[path]
return None
return listing
return None
@_tracemethod
def _list_objects(self, path):
path = norm_path(path)
clisting = self._maybe_get_cached_listing(path)
if clisting:
return clisting
listing = self._do_list_objects(path)
retrieved_time = time.time()
self._listing_cache[path] = (retrieved_time, listing)
return listing
@_tracemethod
def _do_list_objects(self, path, max_results=None):
"""Object listing for the given {bucket}/{prefix}/ path."""
bucket, prefix = split_path(path)
if not prefix:
prefix = None
prefixes = []
items = []
page = self._call(
"GET",
"b/{}/o/",
bucket,
delimiter="/",
prefix=prefix,
maxResults=max_results,
).json()
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend(
[
i
for i in page.get("items", [])
if prefix is None
or i["name"].rstrip("/") == prefix.rstrip("/")
or i["name"].startswith(prefix.rstrip("/") + "/")
]
)
next_page_token = page.get("nextPageToken", None)
while next_page_token is not None:
page = self._call(
"GET",
"b/{}/o/",
bucket,
delimiter="/",
prefix=prefix,
maxResults=max_results,
pageToken=next_page_token,
).json()
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend([i for i in page.get("items", [])])
next_page_token = page.get("nextPageToken", None)
prefixes = [
p for p in prefixes if prefix is None or prefix.rstrip("/") + "/" in p
]
result = {
"kind": "storage#objects",
"prefixes": prefixes,
"items": [self._process_object(bucket, i) for i in items],
}
return result
@_tracemethod
def _list_buckets(self):
"""Return list of all buckets under the current project."""
items = []
page = self._call("GET", "b/", project=self.project).json()
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get("nextPageToken", None)
while next_page_token is not None:
page = self._call(
"GET", "b/", project=self.project, pageToken=next_page_token
).json()
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get("nextPageToken", None)
return [
{"name": i["name"] + "/", "size": 0, "type": "directory"} for i in items
]
@_tracemethod
def invalidate_cache(self, path=None):
"""
Invalidate listing cache for given path, it is reloaded on next use.
Parameters
----------
path: string or None
If None, clear all listings cached else listings at or under given
path.
"""
if not path:
logger.debug("invalidate_cache clearing cache")
self._listing_cache.clear()
else:
path = norm_path(path)
invalid_keys = [k for k in self._listing_cache if k.startswith(path)]
for k in invalid_keys:
self._listing_cache.pop(k, None)
@_tracemethod
def mkdir(self, bucket, acl="projectPrivate", default_acl="bucketOwnerFullControl"):
"""
New bucket
Parameters
----------
bucket: str
bucket name. If contains '/' (i.e., looks like subdir), will
have no effect because GCS doesn't have real directories.
acl: string, one of bACLs
access for the bucket itself
default_acl: str, one of ACLs
default ACL for objects created in this bucket
"""
if bucket in ["", "/"]:
raise ValueError("Cannot create root bucket")
if "/" in bucket:
return
r = self._call(
"post",
"b/",
predefinedAcl=acl,
project=self.project,
predefinedDefaultObjectAcl=default_acl,
json={"name": bucket},
)
r.raise_for_status()
self.invalidate_cache(bucket)
@_tracemethod
def rmdir(self, bucket):
"""Delete an empty bucket
Parameters
----------
bucket: str
bucket name. If contains '/' (i.e., looks like subdir), will
have no effect because GCS doesn't have real directories.
"""
if "/" in bucket:
return
self._call("delete", "b/" + bucket)
self.invalidate_cache(bucket)
def info(self, path, **kwargs):
"""File information about this path."""
path = self._strip_protocol(path)
# Check directory cache for parent dir
parent_path = norm_path(self._parent(path)).rstrip("/")
parent_cache = self._maybe_get_cached_listing(parent_path + "/")
if parent_cache:
for o in parent_cache["items"]:
if o["name"].rstrip("/") == path:
return o
# Check exact file path
out = [
o
for o in self.ls(path, detail=True, **kwargs)
if o["name"].rstrip("/") == path
]
if out:
return out[0]
# Check parent path
out = [
o
for o in self.ls(parent_path, detail=True, **kwargs)
if o["name"].rstrip("/") == path
]
if out:
return out[0]
else:
raise FileNotFoundError(path)
@_tracemethod
def ls(self, path, detail=False):
"""List objects under the given '/{bucket}/{prefix} path."""
path = norm_path(path)
if path in ["/", ""]:
if detail:
return self._list_buckets()
else:
return self.buckets
elif path.endswith("/"):
return self._ls(path, detail)
else:
combined_listing = self._ls(path, detail) + self._ls(path + "/", detail)
if detail:
combined_entries = dict((l["name"], l) for l in combined_listing)
combined_entries.pop(path + "/", None)
return list(combined_entries.values())
else:
return list(set(combined_listing) - {path + "/"})
@_tracemethod
def _ls(self, path, detail=False):
listing = self._list_objects(path)
bucket, key = split_path(path)
item_details = listing["items"]
pseudodirs = [
{
"bucket": bucket,
"name": bucket + "/" + prefix,
"kind": "storage#object",
"size": 0,
"storageClass": "DIRECTORY",
"type": "directory",
}
for prefix in listing["prefixes"]
]
out = item_details + pseudodirs
if detail:
return out
else:
return sorted([o["name"] for o in out])
@staticmethod
def url(path):
""" Get HTTP URL of the given path """
u = "https://www.googleapis.com/download/storage/v1/b/{}/o/{}?alt=media"
bucket, object = split_path(path)
object = quote_plus(object)
return u.format(bucket, object)
@_tracemethod
def cat(self, path):
""" Simple one-shot get of file data """
u2 = self.url(path)
r = self._call("GET", u2)
r.raise_for_status()
if "X-Goog-Hash" in r.headers:
# if header includes md5 hash, check that data matches
bits = r.headers["X-Goog-Hash"].split(",")
for bit in bits:
key, val = bit.split("=", 1)
if key == "md5":
md = b64decode(val)
assert md5(r.content).digest() == md, "Checksum failure"
return r.content
def getxattr(self, path, attr):
"""Get user-defined metadata attribute"""
meta = self.info(path).get("metadata", {})
return meta[attr]
def setxattrs(self, path, content_type=None, content_encoding=None, **kwargs):
""" Set/delete/add writable metadata attributes
Parameters
---------
content_type: str
If not None, set the content-type to this value
content_encoding: str
If not None, set the content-encoding.
See https://cloud.google.com/storage/docs/transcoding
kw_args: key-value pairs like field="value" or field=None
value must be string to add or modify, or None to delete
Returns
-------
Entire metadata after update (even if only path is passed)
"""
i_json = {"metadata": kwargs}
if content_type is not None:
i_json["contentType"] = content_type
if content_encoding is not None:
i_json["contentEncoding"] = content_encoding
bucket, key = split_path(path)
o_json = self._call(
"PATCH", "b/{}/o/{}", bucket, key, fields="metadata", json=i_json
).json()
self.info(path)["metadata"] = o_json.get("metadata", {})
return o_json.get("metadata", {})
@_tracemethod
def merge(self, path, paths, acl=None):
"""Concatenate objects within a single bucket"""
bucket, key = split_path(path)
source = [{"name": split_path(p)[1]} for p in paths]
self._call(
"POST",
"b/{}/o/{}/compose",
bucket,
key,
destinationPredefinedAcl=acl,
json={
"sourceObjects": source,
"kind": "storage#composeRequest",
"destination": {"name": key, "bucket": bucket},
},
)
@_tracemethod
def copy(self, path1, path2, acl=None):
"""Duplicate remote file
"""
b1, k1 = split_path(path1)
b2, k2 = split_path(path2)
out = self._call(
"POST",
"b/{}/o/{}/rewriteTo/b/{}/o/{}",
b1,
k1,
b2,
k2,
destinationPredefinedAcl=acl,
).json()
while out["done"] is not True:
out = self._call(
"POST",
"b/{}/o/{}/rewriteTo/b/{}/o/{}",
b1,
k1,
b2,
k2,
rewriteToken=out["rewriteToken"],
destinationPredefinedAcl=acl,
).json()
@_tracemethod
def rm(self, path, recursive=False):
"""Delete keys.
If a list, batch-delete all keys in one go (can span buckets)
Returns whether operation succeeded (a list if input was a list)
If recursive, delete all keys given by find(path)
"""
if isinstance(path, (tuple, list)):
template = (
"\n--===============7330845974216740156==\n"
"Content-Type: application/http\n"
"Content-Transfer-Encoding: binary\n"
"Content-ID: <b29c5de2-0db4-490b-b421-6a51b598bd11+{i}>"
"\n\nDELETE /storage/v1/b/{bucket}/o/{key} HTTP/1.1\n"
"Content-Type: application/json\n"
"accept: application/json\ncontent-length: 0\n"
)
body = "".join(
[
template.format(
i=i + 1,
bucket=p.split("/", 1)[0],
key=quote_plus(p.split("/", 1)[1]),
)
for i, p in enumerate(path)
]
)
r = self._call(
"POST",
"https://www.googleapis.com/batch",
headers={
"Content-Type": 'multipart/mixed; boundary="=========='
'=====7330845974216740156=="'
},
data=body + "\n--===============7330845974216740156==--",
)
boundary = r.headers["Content-Type"].split("=", 1)[1]
parents = {posixpath.dirname(norm_path(p)) for p in path}
[self.invalidate_cache(parent) for parent in parents]
return [
"200 OK" in c or "204 No Content" in c for c in r.text.split(boundary)
][1:-1]
elif recursive:
return self.rm(self.find(path))
else:
bucket, key = split_path(path)
self._call("DELETE", "b/{}/o/{}", bucket, key)
self.invalidate_cache(posixpath.dirname(norm_path(path)))
return True
@_tracemethod
def _open(
self,
path,
mode="rb",
block_size=None,
cache_options=None,
acl=None,
consistency=None,
metadata=None,
autocommit=True,
**kwargs
):
"""
See ``GCSFile``.
consistency: None or str
If None, use default for this instance
"""
if block_size is None:
block_size = self.default_block_size
const = consistency or self.consistency
return GCSFile(
self,
path,
mode,
block_size,
cache_options=cache_options,
consistency=const,
metadata=metadata,
acl=acl,
autocommit=autocommit,
**kwargs
)
GCSFileSystem.load_tokens()
class GCSFile(fsspec.spec.AbstractBufferedFile):
def __init__(
self,
gcsfs,
path,
mode="rb",
block_size=DEFAULT_BLOCK_SIZE,
autocommit=True,
cache_type="readahead",
cache_options=None,
acl=None,
consistency="md5",
metadata=None,
**kwargs
):
"""
Open a file.
Parameters
----------
gcsfs: instance of GCSFileSystem
path: str
location in GCS, like 'bucket/path/to/file'
mode: str
Normal file modes. Currently only 'wb' amd 'rb'.
block_size: int
Buffer size for reading or writing
acl: str
ACL to apply, if any, one of ``ACLs``. New files are normally
"bucketownerfullcontrol", but a default can be configured per
bucket.
consistency: str, 'none', 'size', 'md5'
Check for success in writing, applied at file close.
'size' ensures that the number of bytes reported by GCS matches
the number we wrote; 'md5' does a full checksum. Any value other
than 'size' or 'md5' is assumed to mean no checking.
metadata: dict
Custom metadata, in key/value pairs, added at file creation
"""
super().__init__(
gcsfs,
path,
mode,
block_size,
autocommit=autocommit,
cache_type=cache_type,
cache_options=cache_options,
**kwargs
)
bucket, key = split_path(path)
if not key:
raise OSError("Attempt to open a bucket")
self.gcsfs = gcsfs
self.bucket = bucket
self.key = key
self.metadata = metadata
self.acl = acl
self.consistency = consistency
if self.consistency == "md5":
self.md5 = md5()
if mode == "wb":
if self.blocksize < GCS_MIN_BLOCK_SIZE:
warnings.warn("Setting block size to minimum value, 2**18")
self.blocksize = GCS_MIN_BLOCK_SIZE
self.location = None
@property
def trim(self):
warnings.warn(
"GCSFSFile.trim has not effect and will be removed in a future version. "
"Access cache settings from GCSFSFile.cache.",
FutureWarning,
)
return True
def info(self):
""" File information about this path """
return self.details
def url(self):
""" HTTP link to this file's data """
return self.details["mediaLink"]
@_tracemethod
def _upload_chunk(self, final=False):
""" Write one part of a multi-block file upload
Parameters
----------
final: bool
Complete and commit upload
"""
self.buffer.seek(0)
data = self.buffer.getvalue()
head = {}
l = len(data)
if final and self.autocommit:
if l:
head["Content-Range"] = "bytes %i-%i/%i" % (
self.offset,
self.offset + l - 1,
self.offset + l,
)
else:
# closing when buffer is empty
head["Content-Range"] = "bytes */%i" % self.offset
data = None
else:
if l < GCS_MIN_BLOCK_SIZE:
if not self.autocommit:
return
elif not final:
raise ValueError("Non-final chunk write below min size.")
head["Content-Range"] = "bytes %i-%i/*" % (self.offset, self.offset + l - 1)
head.update(
{"Content-Type": "application/octet-stream", "Content-Length": str(l)}
)
r = self.gcsfs._call(
"POST", self.location, uploadType="resumable", headers=head, data=data
)
if "Range" in r.headers:
end = int(r.headers["Range"].split("-")[1])
shortfall = (self.offset + l - 1) - end
if shortfall:
if self.consistency == "md5":
self.md5.update(data[:-shortfall])
self.buffer = io.BytesIO(data[-shortfall:])
self.buffer.seek(shortfall)
self.offset += l - shortfall
return False
else:
if self.consistency == "md5":
self.md5.update(data)
elif l:
#
assert final, "Response looks like upload is over"
size, md5 = int(r.json()["size"]), r.json()["md5Hash"]
if self.consistency == "size":
assert size == self.buffer.tell() + self.offset, "Size mismatch"
if self.consistency == "md5":
assert (
b64encode(self.md5.digest()) == md5.encode()
), "MD5 checksum failed"
else:
assert final, "Response looks like upload is over"
return True
def commit(self):
"""If not auto-committing, finalize file"""
self.autocommit = True
self._upload_chunk(final=True)
@_tracemethod
def _initiate_upload(self):
""" Create multi-upload """
r = self.gcsfs._call(
"POST",
"https://www.googleapis.com/upload/storage"
"/v1/b/%s/o" % quote_plus(self.bucket),
uploadType="resumable",
json={"name": self.key, "metadata": self.metadata},
)
self.location = r.headers["Location"]
@_tracemethod
def discard(self):
"""Cancel in-progress multi-upload
Should only happen during discarding this write-mode file
"""
if self.location is None:
return
uid = re.findall("upload_id=([^&=?]+)", self.location)
r = self.gcsfs._call(
"DELETE",
"https://www.googleapis.com/upload/storage/v1/b/%s/o"
"" % quote_plus(self.bucket),
params={"uploadType": "resumable", "upload_id": uid},
)
r.raise_for_status()
@_tracemethod
def _simple_upload(self):
"""One-shot upload, less than 5MB"""
self.buffer.seek(0)
data = self.buffer.read()
path = "https://www.googleapis.com/upload/storage/v1/b/%s/o" % quote_plus(
self.bucket
)
metadata = {"name": self.key}
if self.metadata is not None:
metadata["metadata"] = self.metadata
metadata = json.dumps(metadata)
data = (
(
"--==0=="
"\nContent-Type: application/json; charset=UTF-8"
"\n\n" + metadata + "\n--==0=="
"\nContent-Type: application/octet-stream"
"\n\n"
).encode()
+ data
+ b"\n--==0==--"
)
r = self.gcsfs._call(
"POST",
path,
uploadType="multipart",
headers={"Content-Type": 'multipart/related; boundary="==0=="'},
data=data,
)
size, md5 = int(r.json()["size"]), r.json()["md5Hash"]
if self.consistency == "size":
assert size == self.buffer.tell(), "Size mismatch"
if self.consistency == "md5":
self.md5.update(data)
assert b64encode(self.md5.digest()) == md5.encode(), "MD5 checksum failed"
@_tracemethod
def _fetch_range(self, start=None, end=None):
""" Get data from GCS
start, end : None or integers
if not both None, fetch only given range
"""
if start is not None or end is not None:
start = start or 0
end = end or 0
head = {"Range": "bytes=%i-%i" % (start, end - 1)}
else:
head = None
try:
r = self.gcsfs._call("GET", self.details["mediaLink"], headers=head)
data = r.content
return data
except RuntimeError as e:
if "not satisfiable" in str(e):
return b""
raise
|
[] |
[] |
[
"GCSFS_DEFAULT_PROJECT"
] |
[]
|
["GCSFS_DEFAULT_PROJECT"]
|
python
| 1 | 0 | |
logpolicy/logpolicy.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package logpolicy manages the creation or reuse of logtail loggers,
// caching collection instance state on disk for use on future runs of
// programs on the same machine.
package logpolicy
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/term"
"tailscale.com/atomicfile"
"tailscale.com/logtail"
"tailscale.com/logtail/filch"
"tailscale.com/net/netns"
"tailscale.com/net/tlsdial"
"tailscale.com/net/tshttpproxy"
"tailscale.com/paths"
"tailscale.com/smallzstd"
"tailscale.com/types/logger"
"tailscale.com/util/racebuild"
"tailscale.com/util/winutil"
"tailscale.com/version"
)
var getLogTargetOnce struct {
sync.Once
v string // URL of logs server, or empty for default
}
func getLogTarget() string {
getLogTargetOnce.Do(func() {
if val, ok := os.LookupEnv("TS_LOG_TARGET"); ok {
getLogTargetOnce.v = val
} else {
if runtime.GOOS == "windows" {
getLogTargetOnce.v = winutil.GetRegString("LogTarget", "")
}
}
})
return getLogTargetOnce.v
}
// Config represents an instance of logs in a collection.
type Config struct {
Collection string
PrivateID logtail.PrivateID
PublicID logtail.PublicID
}
// Policy is a logger and its public ID.
type Policy struct {
// Logtail is the logger.
Logtail *logtail.Logger
// PublicID is the logger's instance identifier.
PublicID logtail.PublicID
}
// ToBytes returns the JSON representation of c.
func (c *Config) ToBytes() []byte {
data, err := json.MarshalIndent(c, "", "\t")
if err != nil {
log.Fatalf("logpolicy.Config marshal: %v", err)
}
return data
}
// Save writes the JSON representation of c to stateFile.
func (c *Config) save(stateFile string) error {
c.PublicID = c.PrivateID.Public()
if err := os.MkdirAll(filepath.Dir(stateFile), 0750); err != nil {
return err
}
data := c.ToBytes()
if err := atomicfile.WriteFile(stateFile, data, 0600); err != nil {
return err
}
return nil
}
// ConfigFromBytes parses a a Config from its JSON encoding.
func ConfigFromBytes(jsonEnc []byte) (*Config, error) {
c := &Config{}
if err := json.Unmarshal(jsonEnc, c); err != nil {
return nil, err
}
return c, nil
}
// stderrWriter is an io.Writer that always writes to the latest
// os.Stderr, even if os.Stderr changes during the lifetime of the
// stderrWriter value.
type stderrWriter struct{}
func (stderrWriter) Write(buf []byte) (int, error) {
return os.Stderr.Write(buf)
}
type logWriter struct {
logger *log.Logger
}
func (l logWriter) Write(buf []byte) (int, error) {
l.logger.Printf("%s", buf)
return len(buf), nil
}
// logsDir returns the directory to use for log configuration and
// buffer storage.
func logsDir(logf logger.Logf) string {
if d := os.Getenv("TS_LOGS_DIR"); d != "" {
fi, err := os.Stat(d)
if err == nil && fi.IsDir() {
return d
}
}
// STATE_DIRECTORY is set by systemd 240+ but we support older
// systems-d. For example, Ubuntu 18.04 (Bionic Beaver) is 237.
systemdStateDir := os.Getenv("STATE_DIRECTORY")
if systemdStateDir != "" {
logf("logpolicy: using $STATE_DIRECTORY, %q", systemdStateDir)
return systemdStateDir
}
// Default to e.g. /var/lib/tailscale or /var/db/tailscale on Unix.
if d := paths.DefaultTailscaledStateFile(); d != "" {
d = filepath.Dir(d) // directory of e.g. "/var/lib/tailscale/tailscaled.state"
if err := os.MkdirAll(d, 0700); err == nil {
logf("logpolicy: using system state directory %q", d)
return d
}
}
cacheDir, err := os.UserCacheDir()
if err == nil {
d := filepath.Join(cacheDir, "Tailscale")
logf("logpolicy: using UserCacheDir, %q", d)
return d
}
// Use the current working directory, unless we're being run by a
// service manager that sets it to /.
wd, err := os.Getwd()
if err == nil && wd != "/" {
logf("logpolicy: using current directory, %q", wd)
return wd
}
// No idea where to put stuff. Try to create a temp dir. It'll
// mean we might lose some logs and rotate through log IDs, but
// it's something.
tmp, err := ioutil.TempDir("", "tailscaled-log-*")
if err != nil {
panic("no safe place found to store log state")
}
logf("logpolicy: using temp directory, %q", tmp)
return tmp
}
// runningUnderSystemd reports whether we're running under systemd.
func runningUnderSystemd() bool {
if runtime.GOOS == "linux" && os.Getppid() == 1 {
slurp, _ := ioutil.ReadFile("/proc/1/stat")
return bytes.HasPrefix(slurp, []byte("1 (systemd) "))
}
return false
}
// tryFixLogStateLocation is a temporary fixup for
// https://github.com/tailscale/tailscale/issues/247 . We accidentally
// wrote logging state files to /, and then later to $CACHE_DIRECTORY
// (which is incorrect because the log ID is not reconstructible if
// deleted - it's state, not cache data).
//
// If log state for cmdname exists in / or $CACHE_DIRECTORY, and no
// log state for that command exists in dir, then the log state is
// moved from whereever it does exist, into dir. Leftover logs state
// in / and $CACHE_DIRECTORY is deleted.
func tryFixLogStateLocation(dir, cmdname string) {
switch runtime.GOOS {
case "linux", "freebsd", "openbsd":
// These are the OSes where we might have written stuff into
// root. Others use different logic to find the logs storage
// dir.
default:
return
}
if cmdname == "" {
log.Printf("[unexpected] no cmdname given to tryFixLogStateLocation, please file a bug at https://github.com/tailscale/tailscale")
return
}
if dir == "/" {
// Trying to store things in / still. That's a bug, but don't
// abort hard.
log.Printf("[unexpected] storing logging config in /, please file a bug at https://github.com/tailscale/tailscale")
return
}
if os.Getuid() != 0 {
// Only root could have written log configs to weird places.
return
}
// We stored logs in 2 incorrect places: either /, or CACHE_DIR
// (aka /var/cache/tailscale). We want to move files into the
// provided dir, preferring those in CACHE_DIR over those in / if
// both exist. If files already exist in dir, don't
// overwrite. Finally, once we've maybe moved files around, we
// want to delete leftovers in / and CACHE_DIR, to clean up after
// our past selves.
files := []string{
fmt.Sprintf("%s.log.conf", cmdname),
fmt.Sprintf("%s.log1.txt", cmdname),
fmt.Sprintf("%s.log2.txt", cmdname),
}
// checks if any of the files above exist in d.
checkExists := func(d string) (bool, error) {
for _, file := range files {
p := filepath.Join(d, file)
_, err := os.Stat(p)
if os.IsNotExist(err) {
continue
} else if err != nil {
return false, fmt.Errorf("stat %q: %w", p, err)
}
return true, nil
}
return false, nil
}
// move files from d into dir, if they exist.
moveFiles := func(d string) error {
for _, file := range files {
src := filepath.Join(d, file)
_, err := os.Stat(src)
if os.IsNotExist(err) {
continue
} else if err != nil {
return fmt.Errorf("stat %q: %v", src, err)
}
dst := filepath.Join(dir, file)
bs, err := exec.Command("mv", src, dst).CombinedOutput()
if err != nil {
return fmt.Errorf("mv %q %q: %v (%s)", src, dst, err, bs)
}
}
return nil
}
existsInRoot, err := checkExists("/")
if err != nil {
log.Printf("checking for configs in /: %v", err)
return
}
existsInCache := false
cacheDir := os.Getenv("CACHE_DIRECTORY")
if cacheDir != "" {
existsInCache, err = checkExists("/var/cache/tailscale")
if err != nil {
log.Printf("checking for configs in %s: %v", cacheDir, err)
}
}
existsInDest, err := checkExists(dir)
if err != nil {
log.Printf("checking for configs in %s: %v", dir, err)
return
}
switch {
case !existsInRoot && !existsInCache:
// No leftover files, nothing to do.
return
case existsInDest:
// Already have "canonical" configs, just delete any remnants
// (below).
case existsInCache:
// CACHE_DIRECTORY takes precedence over /, move files from
// there.
if err := moveFiles(cacheDir); err != nil {
log.Print(err)
return
}
case existsInRoot:
// Files from root is better than nothing.
if err := moveFiles("/"); err != nil {
log.Print(err)
return
}
}
// If moving succeeded, or we didn't need to move files, try to
// delete any leftover files, but it's okay if we can't delete
// them for some reason.
dirs := []string{}
if existsInCache {
dirs = append(dirs, cacheDir)
}
if existsInRoot {
dirs = append(dirs, "/")
}
for _, d := range dirs {
for _, file := range files {
p := filepath.Join(d, file)
_, err := os.Stat(p)
if os.IsNotExist(err) {
continue
} else if err != nil {
log.Printf("stat %q: %v", p, err)
return
}
if err := os.Remove(p); err != nil {
log.Printf("rm %q: %v", p, err)
}
}
}
}
// New returns a new log policy (a logger and its instance ID) for a
// given collection name.
func New(collection string) *Policy {
var lflags int
if term.IsTerminal(2) || runtime.GOOS == "windows" {
lflags = 0
} else {
lflags = log.LstdFlags
}
if v, _ := strconv.ParseBool(os.Getenv("TS_DEBUG_LOG_TIME")); v {
lflags = log.LstdFlags | log.Lmicroseconds
}
if runningUnderSystemd() {
// If journalctl is going to prepend its own timestamp
// anyway, no need to add one.
lflags = 0
}
console := log.New(stderrWriter{}, "", lflags)
var earlyErrBuf bytes.Buffer
earlyLogf := func(format string, a ...interface{}) {
fmt.Fprintf(&earlyErrBuf, format, a...)
earlyErrBuf.WriteByte('\n')
}
dir := logsDir(earlyLogf)
cmdName := version.CmdName()
tryFixLogStateLocation(dir, cmdName)
cfgPath := filepath.Join(dir, fmt.Sprintf("%s.log.conf", cmdName))
// The Windows service previously ran as tailscale-ipn.exe, so
// let's keep using that log base name if it exists.
if runtime.GOOS == "windows" && cmdName == "tailscaled" {
const oldCmdName = "tailscale-ipn"
oldPath := filepath.Join(dir, oldCmdName+".log.conf")
if fi, err := os.Stat(oldPath); err == nil && fi.Mode().IsRegular() {
cfgPath = oldPath
cmdName = oldCmdName
}
}
var oldc *Config
data, err := ioutil.ReadFile(cfgPath)
if err != nil {
earlyLogf("logpolicy.Read %v: %v", cfgPath, err)
oldc = &Config{}
oldc.Collection = collection
} else {
oldc, err = ConfigFromBytes(data)
if err != nil {
earlyLogf("logpolicy.Config unmarshal: %v", err)
oldc = &Config{}
}
}
newc := *oldc
if newc.Collection != collection {
log.Printf("logpolicy.Config: config collection %q does not match %q", newc.Collection, collection)
// We picked up an incompatible config file.
// Regenerate the private ID.
newc.PrivateID = logtail.PrivateID{}
newc.Collection = collection
}
if newc.PrivateID.IsZero() {
newc.PrivateID, err = logtail.NewPrivateID()
if err != nil {
log.Fatalf("logpolicy: NewPrivateID() should never fail")
}
}
newc.PublicID = newc.PrivateID.Public()
if newc != *oldc {
if err := newc.save(cfgPath); err != nil {
earlyLogf("logpolicy.Config.Save: %v", err)
}
}
c := logtail.Config{
Collection: newc.Collection,
PrivateID: newc.PrivateID,
Stderr: logWriter{console},
NewZstdEncoder: func() logtail.Encoder {
w, err := smallzstd.NewEncoder(nil)
if err != nil {
panic(err)
}
return w
},
HTTPC: &http.Client{Transport: newLogtailTransport(logtail.DefaultHost)},
}
if val := getLogTarget(); val != "" {
log.Println("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.")
c.BaseURL = val
u, _ := url.Parse(val)
c.HTTPC = &http.Client{Transport: newLogtailTransport(u.Host)}
}
filchBuf, filchErr := filch.New(filepath.Join(dir, cmdName), filch.Options{})
if filchBuf != nil {
c.Buffer = filchBuf
}
lw := logtail.NewLogger(c, log.Printf)
log.SetFlags(0) // other logflags are set on console, not here
log.SetOutput(lw)
log.Printf("Program starting: v%v, Go %v: %#v",
version.Long,
goVersion(),
os.Args)
log.Printf("LogID: %v", newc.PublicID)
if filchErr != nil {
log.Printf("filch failed: %v", filchErr)
}
if earlyErrBuf.Len() != 0 {
log.Printf("%s", earlyErrBuf.Bytes())
}
return &Policy{
Logtail: lw,
PublicID: newc.PublicID,
}
}
// SetVerbosityLevel controls the verbosity level that should be
// written to stderr. 0 is the default (not verbose). Levels 1 or higher
// are increasingly verbose.
//
// It should not be changed concurrently with log writes.
func (p *Policy) SetVerbosityLevel(level int) {
p.Logtail.SetVerbosityLevel(level)
}
// Close immediately shuts down the logger.
func (p *Policy) Close() {
ctx, cancel := context.WithCancel(context.Background())
cancel()
p.Shutdown(ctx)
}
// Shutdown gracefully shuts down the logger, finishing any current
// log upload if it can be done before ctx is canceled.
func (p *Policy) Shutdown(ctx context.Context) error {
if p.Logtail != nil {
log.Printf("flushing log.")
return p.Logtail.Shutdown(ctx)
}
return nil
}
// newLogtailTransport returns the HTTP Transport we use for uploading
// logs to the given host name.
func newLogtailTransport(host string) *http.Transport {
// Start with a copy of http.DefaultTransport and tweak it a bit.
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.Proxy = tshttpproxy.ProxyFromEnvironment
tshttpproxy.SetTransportGetProxyConnectHeader(tr)
// We do our own zstd compression on uploads, and responses never contain any payload,
// so don't send "Accept-Encoding: gzip" to save a few bytes on the wire, since there
// will never be any body to decompress:
tr.DisableCompression = true
// Log whenever we dial:
tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) {
nd := netns.FromDialer(&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
})
t0 := time.Now()
c, err := nd.DialContext(ctx, netw, addr)
d := time.Since(t0).Round(time.Millisecond)
if err != nil {
log.Printf("logtail: dial %q failed: %v (in %v)", addr, err, d)
} else {
log.Printf("logtail: dialed %q in %v", addr, d)
}
return c, err
}
// We're contacting exactly 1 hostname, so the default's 100
// max idle conns is very high for our needs. Even 2 is
// probably double what we need:
tr.MaxIdleConns = 2
// Provide knob to force HTTP/1 for log uploads.
// TODO(bradfitz): remove this debug knob once we've decided
// to upload via HTTP/1 or HTTP/2 (probably HTTP/1). Or we might just enforce
// it server-side.
if h1, _ := strconv.ParseBool(os.Getenv("TS_DEBUG_FORCE_H1_LOGS")); h1 {
tr.TLSClientConfig = nil // DefaultTransport's was already initialized w/ h2
tr.ForceAttemptHTTP2 = false
tr.TLSNextProto = map[string]func(authority string, c *tls.Conn) http.RoundTripper{}
}
tr.TLSClientConfig = tlsdial.Config(host, tr.TLSClientConfig)
return tr
}
func goVersion() string {
v := strings.TrimPrefix(runtime.Version(), "go")
if racebuild.On {
return v + "-race"
}
return v
}
|
[
"\"TS_LOGS_DIR\"",
"\"STATE_DIRECTORY\"",
"\"CACHE_DIRECTORY\"",
"\"TS_DEBUG_LOG_TIME\"",
"\"TS_DEBUG_FORCE_H1_LOGS\""
] |
[] |
[
"TS_DEBUG_LOG_TIME",
"STATE_DIRECTORY",
"CACHE_DIRECTORY",
"TS_DEBUG_FORCE_H1_LOGS",
"TS_LOGS_DIR"
] |
[]
|
["TS_DEBUG_LOG_TIME", "STATE_DIRECTORY", "CACHE_DIRECTORY", "TS_DEBUG_FORCE_H1_LOGS", "TS_LOGS_DIR"]
|
go
| 5 | 0 | |
transport/memory/memory_test.go
|
package memory
import (
"os"
"testing"
"github.com/micro/go-micro/v2/transport"
)
func TestMemoryTransport(t *testing.T) {
tr := NewTransport()
// bind / listen
l, err := tr.Listen("127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
// accept
go func() {
if err := l.Accept(func(sock transport.Socket) {
for {
var m transport.Message
if err := sock.Recv(&m); err != nil {
return
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Server Received %s", string(m.Body))
}
if err := sock.Send(&transport.Message{
Body: []byte(`pong`),
}); err != nil {
return
}
}
}); err != nil {
t.Fatalf("Unexpected error accepting %v", err)
}
}()
// dial
c, err := tr.Dial("127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error dialing %v", err)
}
defer c.Close()
// send <=> receive
for i := 0; i < 3; i++ {
if err := c.Send(&transport.Message{
Body: []byte(`ping`),
}); err != nil {
return
}
var m transport.Message
if err := c.Recv(&m); err != nil {
return
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Client Received %s", string(m.Body))
}
}
}
func TestListener(t *testing.T) {
tr := NewTransport()
// bind / listen on random port
l, err := tr.Listen(":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
// try again
l2, err := tr.Listen(":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l2.Close()
// now make sure it still fails
l3, err := tr.Listen(":8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l3.Close()
if _, err := tr.Listen(":8080"); err == nil {
t.Fatal("Expected error binding to :8080 got nil")
}
}
|
[
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\""
] |
[] |
[
"IN_TRAVIS_CI"
] |
[]
|
["IN_TRAVIS_CI"]
|
go
| 1 | 0 | |
slack/slack_test.go
|
package slack
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// generic helper for loading Slack API wrapper with Webhook URL
func getAPI(t *testing.T) Provider {
webhookURL := os.Getenv("SLACK_WEBHOOK_URL")
require.NotEmpty(t, webhookURL, "ParameterNotFound: SLACK_WEBHOOK_URL")
api := New(webhookURL)
api.Payload.IconEmoji = ":robot_face:"
api.Payload.Username = "Insights Slack - Unit Test"
return api
}
func TestSendEmpty(t *testing.T) {
api := getAPI(t)
err := api.Send()
assert.EqualError(t, err, errorEmptyText)
}
func TestSendNotEmpty(t *testing.T) {
api := getAPI(t)
api.SetText("TestSendNotEmpty Test Content")
err := api.Send()
assert.NoError(t, err, "unexpected err from api.Send()")
}
func TestSendText(t *testing.T) {
api := getAPI(t)
err := api.SendText("TestSendText Test Content")
assert.NoError(t, err, "unexpected err from api.SendText()")
}
|
[
"\"SLACK_WEBHOOK_URL\""
] |
[] |
[
"SLACK_WEBHOOK_URL"
] |
[]
|
["SLACK_WEBHOOK_URL"]
|
go
| 1 | 0 | |
zed_python_sample/darknet_zed.py
|
#!python3
"""
Python 3 wrapper for identifying objects in images
Requires DLL compilation
Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
@author: Philip Kahn, Aymeric Dujardin
@date: 20180911
"""
# pylint: disable=R, W0401, W0614, W0703
import os
import sys
import time
import logging
import random
from random import randint
import math
import statistics
import getopt
from ctypes import *
import numpy as np
import cv2
import pyzed.sl as sl
# Get the top-level logger object
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
#lib = CDLL("darknet.so", RTLD_GLOBAL)
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
log.info("Flag value '"+tmp+"' not forcing CPU mode")
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError:
pass
# log.info(os.environ.keys())
# log.warning("FORCE_CPU flag undefined, proceeding with GPU")
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
log.warning("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was
# compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
log.warning("Environment variables indicated a CPU run, but we didn't find `" +
winNoGPUdll+"`. Trying a GPU run anyway.")
else:
lib = CDLL("../libdarknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(
c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False):
"""
Performs the detection
"""
custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
custom_image = cv2.resize(custom_image, (lib.network_width(
net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR)
im, arr = array_to_image(custom_image)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(
net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
if debug:
log.debug("about to range")
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i))
res = sorted(res, key=lambda x: -x[1])
free_detections(dets, num)
return res
netMain = None
metaMain = None
altNames = None
def get_object_depth(depth, bounds):
'''
Calculates the median x, y, z position of top slice(area_div) of point cloud
in camera frame.
Arguments:
depth: Point cloud data of whole frame.
bounds: Bounding box for object in pixels.
bounds[0]: x-center
bounds[1]: y-center
bounds[2]: width of bounding box.
bounds[3]: height of bounding box.
Return:
x, y, z: Location of object in meters.
'''
area_div = 2
x_vect = []
y_vect = []
z_vect = []
for j in range(int(bounds[0] - area_div), int(bounds[0] + area_div)):
for i in range(int(bounds[1] - area_div), int(bounds[1] + area_div)):
z = depth[i, j, 2]
if not np.isnan(z) and not np.isinf(z):
x_vect.append(depth[i, j, 0])
y_vect.append(depth[i, j, 1])
z_vect.append(z)
try:
x_median = statistics.median(x_vect)
y_median = statistics.median(y_vect)
z_median = statistics.median(z_vect)
except Exception:
x_median = -1
y_median = -1
z_median = -1
pass
return x_median, y_median, z_median
def generate_color(meta_path):
'''
Generate random colors for the number of classes mentioned in data file.
Arguments:
meta_path: Path to .data file.
Return:
color_array: RGB color codes for each class.
'''
random.seed(42)
with open(meta_path, 'r') as f:
content = f.readlines()
class_num = int(content[0].split("=")[1])
color_array = []
for x in range(0, class_num):
color_array.append((randint(0, 255), randint(0, 255), randint(0, 255)))
return color_array
def main(argv):
thresh = 0.25
darknet_path="../libdarknet/"
config_path = darknet_path + "cfg/yolov3-tiny.cfg"
weight_path = "yolov3-tiny.weights"
meta_path = "coco.data"
svo_path = None
zed_id = 0
help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
try:
opts, args = getopt.getopt(
argv, "hc:w:m:t:s:z:", ["config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="])
except getopt.GetoptError:
log.exception(help_str)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
log.info(help_str)
sys.exit()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("-w", "--weight"):
weight_path = arg
elif opt in ("-m", "--meta"):
meta_path = arg
elif opt in ("-t", "--threshold"):
thresh = float(arg)
elif opt in ("-s", "--svo_file"):
svo_path = arg
elif opt in ("-z", "--zed_id"):
zed_id = int(arg)
input_type = sl.InputType()
if svo_path is not None:
log.info("SVO file : " + svo_path)
input_type.set_from_svo_file(svo_path)
else:
# Launch camera by id
input_type.set_from_camera_id(zed_id)
init = sl.InitParameters(input_t=input_type)
init.coordinate_units = sl.UNIT.METER
cam = sl.Camera()
if not cam.is_opened():
log.info("Opening ZED Camera...")
status = cam.open(init)
if status != sl.ERROR_CODE.SUCCESS:
log.error(repr(status))
exit()
runtime = sl.RuntimeParameters()
# Use STANDARD sensing mode
runtime.sensing_mode = sl.SENSING_MODE.STANDARD
mat = sl.Mat()
point_cloud_mat = sl.Mat()
# Import the global variables. This lets us instance Darknet once,
# then just call performDetect() again without instancing again
global metaMain, netMain, altNames # pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(config_path):
raise ValueError("Invalid config path `" +
os.path.abspath(config_path)+"`")
if not os.path.exists(weight_path):
raise ValueError("Invalid weight path `" +
os.path.abspath(weight_path)+"`")
if not os.path.exists(meta_path):
raise ValueError("Invalid data file path `" +
os.path.abspath(meta_path)+"`")
if netMain is None:
netMain = load_net_custom(config_path.encode(
"ascii"), weight_path.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(meta_path.encode("ascii"))
if altNames is None:
# In thon 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(meta_path) as meta_fh:
meta_contents = meta_fh.read()
import re
match = re.search("names *= *(.*)$", meta_contents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as names_fh:
names_list = names_fh.read().strip().split("\n")
altNames = [x.strip() for x in names_list]
except TypeError:
pass
except Exception:
pass
color_array = generate_color(meta_path)
log.info("Running...")
key = ''
while key != 113: # for 'q' key
start_time = time.time() # start time of the loop
err = cam.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
cam.retrieve_image(mat, sl.VIEW.LEFT)
image = mat.get_data()
cam.retrieve_measure(
point_cloud_mat, sl.MEASURE.XYZRGBA)
depth = point_cloud_mat.get_data()
# Do the detection
detections = detect(netMain, metaMain, image, thresh)
log.info(chr(27) + "[2J"+"**** " + str(len(detections)) + " Results ****")
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = label+": "+str(np.rint(100 * confidence))+"%"
log.info(pstring)
bounds = detection[2]
y_extent = int(bounds[3])
x_extent = int(bounds[2])
# Coordinates are around the center
x_coord = int(bounds[0] - bounds[2]/2)
y_coord = int(bounds[1] - bounds[3]/2)
#boundingBox = [[x_coord, y_coord], [x_coord, y_coord + y_extent], [x_coord + x_extent, y_coord + y_extent], [x_coord + x_extent, y_coord]]
thickness = 1
x, y, z = get_object_depth(depth, bounds)
distance = math.sqrt(x * x + y * y + z * z)
distance = "{:.2f}".format(distance)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + (18 + thickness*4)),
color_array[detection[3]], -1)
cv2.putText(image, label + " " + (str(distance) + " m"),
(x_coord + (thickness * 4), y_coord + (10 + thickness * 4)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + y_extent + thickness),
color_array[detection[3]], int(thickness*2))
cv2.imshow("ZED", image)
key = cv2.waitKey(5)
log.info("FPS: {}".format(1.0 / (time.time() - start_time)))
else:
key = cv2.waitKey(5)
cv2.destroyAllWindows()
cam.close()
log.info("\nFINISH")
if __name__ == "__main__":
main(sys.argv[1:])
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"FORCE_CPU",
"PATH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "FORCE_CPU", "PATH"]
|
python
| 3 | 0 | |
cmd/fortifyExecuteScan_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type fortifyExecuteScanOptions struct {
AdditionalScanParameters []string `json:"additionalScanParameters,omitempty"`
AuthToken string `json:"authToken,omitempty"`
CustomScanVersion string `json:"customScanVersion,omitempty"`
GithubToken string `json:"githubToken,omitempty"`
AutoCreate bool `json:"autoCreate,omitempty"`
ModulePath string `json:"modulePath,omitempty"`
PythonRequirementsFile string `json:"pythonRequirementsFile,omitempty"`
AutodetectClasspath bool `json:"autodetectClasspath,omitempty"`
MustAuditIssueGroups string `json:"mustAuditIssueGroups,omitempty"`
SpotAuditIssueGroups string `json:"spotAuditIssueGroups,omitempty"`
PythonRequirementsInstallSuffix string `json:"pythonRequirementsInstallSuffix,omitempty"`
PythonVersion string `json:"pythonVersion,omitempty"`
UploadResults bool `json:"uploadResults,omitempty"`
Version string `json:"version,omitempty"`
BuildDescriptorFile string `json:"buildDescriptorFile,omitempty"`
CommitID string `json:"commitId,omitempty"`
CommitMessage string `json:"commitMessage,omitempty"`
GithubAPIURL string `json:"githubApiUrl,omitempty"`
Owner string `json:"owner,omitempty"`
Repository string `json:"repository,omitempty"`
Memory string `json:"memory,omitempty"`
UpdateRulePack bool `json:"updateRulePack,omitempty"`
ReportDownloadEndpoint string `json:"reportDownloadEndpoint,omitempty"`
PollingMinutes int `json:"pollingMinutes,omitempty"`
QuickScan bool `json:"quickScan,omitempty"`
Translate string `json:"translate,omitempty"`
Src []string `json:"src,omitempty"`
Exclude []string `json:"exclude,omitempty"`
APIEndpoint string `json:"apiEndpoint,omitempty"`
ReportType string `json:"reportType,omitempty"`
PythonAdditionalPath []string `json:"pythonAdditionalPath,omitempty"`
ArtifactURL string `json:"artifactUrl,omitempty"`
ConsiderSuspicious bool `json:"considerSuspicious,omitempty"`
FprUploadEndpoint string `json:"fprUploadEndpoint,omitempty"`
ProjectName string `json:"projectName,omitempty"`
Reporting bool `json:"reporting,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
PullRequestMessageRegexGroup int `json:"pullRequestMessageRegexGroup,omitempty"`
DeltaMinutes int `json:"deltaMinutes,omitempty"`
SpotCheckMinimum int `json:"spotCheckMinimum,omitempty"`
FprDownloadEndpoint string `json:"fprDownloadEndpoint,omitempty"`
VersioningModel string `json:"versioningModel,omitempty"`
PythonInstallCommand string `json:"pythonInstallCommand,omitempty"`
ReportTemplateID int `json:"reportTemplateId,omitempty"`
FilterSetTitle string `json:"filterSetTitle,omitempty"`
PullRequestName string `json:"pullRequestName,omitempty"`
PullRequestMessageRegex string `json:"pullRequestMessageRegex,omitempty"`
BuildTool string `json:"buildTool,omitempty"`
ProjectSettingsFile string `json:"projectSettingsFile,omitempty"`
GlobalSettingsFile string `json:"globalSettingsFile,omitempty"`
M2Path string `json:"m2Path,omitempty"`
VerifyOnly bool `json:"verifyOnly,omitempty"`
InstallArtifacts bool `json:"installArtifacts,omitempty"`
}
type fortifyExecuteScanInflux struct {
fortify_data struct {
fields struct {
projectName string
projectVersion string
violations int
corporateTotal int
corporateAudited int
auditAllTotal int
auditAllAudited int
spotChecksTotal int
spotChecksAudited int
spotChecksGap int
suspicious int
exploitable int
suppressed int
}
tags struct {
}
}
}
func (i *fortifyExecuteScanInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value interface{}
}{
{valType: config.InfluxField, measurement: "fortify_data", name: "projectName", value: i.fortify_data.fields.projectName},
{valType: config.InfluxField, measurement: "fortify_data", name: "projectVersion", value: i.fortify_data.fields.projectVersion},
{valType: config.InfluxField, measurement: "fortify_data", name: "violations", value: i.fortify_data.fields.violations},
{valType: config.InfluxField, measurement: "fortify_data", name: "corporateTotal", value: i.fortify_data.fields.corporateTotal},
{valType: config.InfluxField, measurement: "fortify_data", name: "corporateAudited", value: i.fortify_data.fields.corporateAudited},
{valType: config.InfluxField, measurement: "fortify_data", name: "auditAllTotal", value: i.fortify_data.fields.auditAllTotal},
{valType: config.InfluxField, measurement: "fortify_data", name: "auditAllAudited", value: i.fortify_data.fields.auditAllAudited},
{valType: config.InfluxField, measurement: "fortify_data", name: "spotChecksTotal", value: i.fortify_data.fields.spotChecksTotal},
{valType: config.InfluxField, measurement: "fortify_data", name: "spotChecksAudited", value: i.fortify_data.fields.spotChecksAudited},
{valType: config.InfluxField, measurement: "fortify_data", name: "spotChecksGap", value: i.fortify_data.fields.spotChecksGap},
{valType: config.InfluxField, measurement: "fortify_data", name: "suspicious", value: i.fortify_data.fields.suspicious},
{valType: config.InfluxField, measurement: "fortify_data", name: "exploitable", value: i.fortify_data.fields.exploitable},
{valType: config.InfluxField, measurement: "fortify_data", name: "suppressed", value: i.fortify_data.fields.suppressed},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Influx environment")
}
}
// FortifyExecuteScanCommand This step executes a Fortify scan on the specified project to perform static code analysis and check the source code for security flaws.
func FortifyExecuteScanCommand() *cobra.Command {
const STEP_NAME = "fortifyExecuteScan"
metadata := fortifyExecuteScanMetadata()
var stepConfig fortifyExecuteScanOptions
var startTime time.Time
var influx fortifyExecuteScanInflux
var createFortifyExecuteScanCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step executes a Fortify scan on the specified project to perform static code analysis and check the source code for security flaws.",
Long: `This step executes a Fortify scan on the specified project to perform static code analysis and check the source code for security flaws.
The Fortify step triggers a scan locally on your Jenkins within a docker container so finally you have to supply a docker image with a Fortify SCA
and Java plus Maven or alternatively Python installed into it for being able to perform any scans.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.AuthToken)
log.RegisterSecret(stepConfig.GithubToken)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
influx.persist(GeneralConfig.EnvRootPath, "influx")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
fortifyExecuteScan(stepConfig, &telemetryData, &influx)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addFortifyExecuteScanFlags(createFortifyExecuteScanCmd, &stepConfig)
return createFortifyExecuteScanCmd
}
func addFortifyExecuteScanFlags(cmd *cobra.Command, stepConfig *fortifyExecuteScanOptions) {
cmd.Flags().StringSliceVar(&stepConfig.AdditionalScanParameters, "additionalScanParameters", []string{}, "List of additional scan parameters to be used for Fortify sourceanalyzer command execution.")
cmd.Flags().StringVar(&stepConfig.AuthToken, "authToken", os.Getenv("PIPER_authToken"), "The FortifyToken to use for authentication")
cmd.Flags().StringVar(&stepConfig.CustomScanVersion, "customScanVersion", os.Getenv("PIPER_customScanVersion"), "Custom version of the Fortify project used as source.")
cmd.Flags().StringVar(&stepConfig.GithubToken, "githubToken", os.Getenv("PIPER_githubToken"), "GitHub personal access token as per https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line")
cmd.Flags().BoolVar(&stepConfig.AutoCreate, "autoCreate", false, "Whether Fortify project and project version shall be implicitly auto created in case they cannot be found in the backend")
cmd.Flags().StringVar(&stepConfig.ModulePath, "modulePath", `./`, "Allows providing the path for the module to scan")
cmd.Flags().StringVar(&stepConfig.PythonRequirementsFile, "pythonRequirementsFile", os.Getenv("PIPER_pythonRequirementsFile"), "The requirements file used in `buildTool: 'pip'` to populate the build environment with the necessary dependencies")
cmd.Flags().BoolVar(&stepConfig.AutodetectClasspath, "autodetectClasspath", true, "Whether the classpath is automatically determined via build tool i.e. maven or pip or not at all")
cmd.Flags().StringVar(&stepConfig.MustAuditIssueGroups, "mustAuditIssueGroups", `Corporate Security Requirements, Audit All`, "Comma separated list of issue groups that must be audited completely")
cmd.Flags().StringVar(&stepConfig.SpotAuditIssueGroups, "spotAuditIssueGroups", `Spot Checks of Each Category`, "Comma separated list of issue groups that are spot checked and for which `spotCheckMinimum` audited issues are enforced")
cmd.Flags().StringVar(&stepConfig.PythonRequirementsInstallSuffix, "pythonRequirementsInstallSuffix", os.Getenv("PIPER_pythonRequirementsInstallSuffix"), "The suffix for the command used to install the requirements file in `buildTool: 'pip'` to populate the build environment with the necessary dependencies")
cmd.Flags().StringVar(&stepConfig.PythonVersion, "pythonVersion", `python3`, "Python version to be used in `buildTool: 'pip'`")
cmd.Flags().BoolVar(&stepConfig.UploadResults, "uploadResults", true, "Whether results shall be uploaded or not")
cmd.Flags().StringVar(&stepConfig.Version, "version", os.Getenv("PIPER_version"), "Version used in conjunction with [`versioningModel`](#versioningModel) to identify the Fortify project to be created and used for results aggregation.")
cmd.Flags().StringVar(&stepConfig.BuildDescriptorFile, "buildDescriptorFile", `./pom.xml`, "Path to the build descriptor file addressing the module/folder to be scanned.")
cmd.Flags().StringVar(&stepConfig.CommitID, "commitId", os.Getenv("PIPER_commitId"), "Set the Git commit ID for identifying artifacts throughout the scan.")
cmd.Flags().StringVar(&stepConfig.CommitMessage, "commitMessage", os.Getenv("PIPER_commitMessage"), "Set the Git commit message for identifying pull request merges throughout the scan.")
cmd.Flags().StringVar(&stepConfig.GithubAPIURL, "githubApiUrl", `https://api.github.com`, "Set the GitHub API URL.")
cmd.Flags().StringVar(&stepConfig.Owner, "owner", os.Getenv("PIPER_owner"), "Set the GitHub organization.")
cmd.Flags().StringVar(&stepConfig.Repository, "repository", os.Getenv("PIPER_repository"), "Set the GitHub repository.")
cmd.Flags().StringVar(&stepConfig.Memory, "memory", `-Xmx4G -Xms512M`, "The amount of memory granted to the translate/scan executions")
cmd.Flags().BoolVar(&stepConfig.UpdateRulePack, "updateRulePack", true, "Whether the rule pack shall be updated and pulled from Fortify SSC before scanning or not")
cmd.Flags().StringVar(&stepConfig.ReportDownloadEndpoint, "reportDownloadEndpoint", `/transfer/reportDownload.html`, "Fortify SSC endpoint for Report downloads")
cmd.Flags().IntVar(&stepConfig.PollingMinutes, "pollingMinutes", 30, "The number of minutes for which an uploaded FPR artifact''s status is being polled to finish queuing/processing, if exceeded polling will be stopped and an error will be thrown")
cmd.Flags().BoolVar(&stepConfig.QuickScan, "quickScan", false, "Whether a quick scan should be performed, please consult the related Fortify documentation on JAM on the impact of this setting")
cmd.Flags().StringVar(&stepConfig.Translate, "translate", os.Getenv("PIPER_translate"), "Options for translate phase of Fortify. Most likely, you do not need to set this parameter. See src, exclude. If `'src'` and `'exclude'` are set they are automatically used. Technical details: It has to be a JSON string of list of maps with required key `'src'`, and optional keys `'exclude'`, `'libDirs'`, `'aspnetcore'`, and `'dotNetCoreVersion'`")
cmd.Flags().StringSliceVar(&stepConfig.Src, "src", []string{}, "A list of source directories to scan. Wildcards can be used, e.g., `'src/main/java/**/*'`. If `'translate'` is set, this will ignored. The default value for `buildTool: 'maven'` is `['**/*.xml', '**/*.html', '**/*.jsp', '**/*.js', '**/src/main/resources/**/*', '**/src/main/java/**/*']`, for `buildTool: 'pip'` it is `['./**/*']`.")
cmd.Flags().StringSliceVar(&stepConfig.Exclude, "exclude", []string{}, "A list of directories/files to be excluded from the scan. Wildcards can be used, e.g., `'**/Test.java'`. If `translate` is set, this will ignored.")
cmd.Flags().StringVar(&stepConfig.APIEndpoint, "apiEndpoint", `/api/v1`, "Fortify SSC endpoint used for uploading the scan results and checking the audit state")
cmd.Flags().StringVar(&stepConfig.ReportType, "reportType", `PDF`, "The type of report to be generated")
cmd.Flags().StringSliceVar(&stepConfig.PythonAdditionalPath, "pythonAdditionalPath", []string{`./lib`, `.`}, "A list of additional paths which can be used in `buildTool: 'pip'` for customization purposes")
cmd.Flags().StringVar(&stepConfig.ArtifactURL, "artifactUrl", os.Getenv("PIPER_artifactUrl"), "Path/URL pointing to an additional artifact repository for resolution of additional artifacts during the build")
cmd.Flags().BoolVar(&stepConfig.ConsiderSuspicious, "considerSuspicious", true, "Whether suspicious issues should trigger the check to fail or not")
cmd.Flags().StringVar(&stepConfig.FprUploadEndpoint, "fprUploadEndpoint", `/upload/resultFileUpload.html`, "Fortify SSC endpoint for FPR uploads")
cmd.Flags().StringVar(&stepConfig.ProjectName, "projectName", `{{list .GroupID .ArtifactID | join "-" | trimAll "-"}}`, "The project used for reporting results in SSC")
cmd.Flags().BoolVar(&stepConfig.Reporting, "reporting", false, "Influences whether a report is generated or not")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", os.Getenv("PIPER_serverUrl"), "Fortify SSC Url to be used for accessing the APIs")
cmd.Flags().IntVar(&stepConfig.PullRequestMessageRegexGroup, "pullRequestMessageRegexGroup", 1, "The group number for extracting the pull request id in `'pullRequestMessageRegex'`")
cmd.Flags().IntVar(&stepConfig.DeltaMinutes, "deltaMinutes", 5, "The number of minutes for which an uploaded FPR artifact is considered to be recent and healthy, if exceeded an error will be thrown")
cmd.Flags().IntVar(&stepConfig.SpotCheckMinimum, "spotCheckMinimum", 1, "The minimum number of issues that must be audited per category in the `Spot Checks of each Category` folder to avoid an error being thrown")
cmd.Flags().StringVar(&stepConfig.FprDownloadEndpoint, "fprDownloadEndpoint", `/download/currentStateFprDownload.html`, "Fortify SSC endpoint for FPR downloads")
cmd.Flags().StringVar(&stepConfig.VersioningModel, "versioningModel", `major`, "The default project versioning model used for creating the version based on the build descriptor version to report results in SSC, can be one of `'major'`, `'major-minor'`, `'semantic'`, `'full'`")
cmd.Flags().StringVar(&stepConfig.PythonInstallCommand, "pythonInstallCommand", `{{.Pip}} install --user .`, "Additional install command that can be run when `buildTool: 'pip'` is used which allows further customizing the execution environment of the scan")
cmd.Flags().IntVar(&stepConfig.ReportTemplateID, "reportTemplateId", 18, "Report template ID to be used for generating the Fortify report")
cmd.Flags().StringVar(&stepConfig.FilterSetTitle, "filterSetTitle", `SAP`, "Title of the filter set to use for analysing the results")
cmd.Flags().StringVar(&stepConfig.PullRequestName, "pullRequestName", os.Getenv("PIPER_pullRequestName"), "The name of the pull request branch which will trigger creation of a new version in Fortify SSC based on the master branch version")
cmd.Flags().StringVar(&stepConfig.PullRequestMessageRegex, "pullRequestMessageRegex", `.*Merge pull request #(\\d+) from.*`, "Regex used to identify the PR-XXX reference within the merge commit message")
cmd.Flags().StringVar(&stepConfig.BuildTool, "buildTool", `maven`, "Scan type used for the step which can be `'maven'`, `'pip'`")
cmd.Flags().StringVar(&stepConfig.ProjectSettingsFile, "projectSettingsFile", os.Getenv("PIPER_projectSettingsFile"), "Path to the mvn settings file that should be used as project settings file.")
cmd.Flags().StringVar(&stepConfig.GlobalSettingsFile, "globalSettingsFile", os.Getenv("PIPER_globalSettingsFile"), "Path to the mvn settings file that should be used as global settings file.")
cmd.Flags().StringVar(&stepConfig.M2Path, "m2Path", os.Getenv("PIPER_m2Path"), "Path to the location of the local repository that should be used.")
cmd.Flags().BoolVar(&stepConfig.VerifyOnly, "verifyOnly", false, "Whether the step shall only apply verification checks or whether it does a full scan and check cycle")
cmd.Flags().BoolVar(&stepConfig.InstallArtifacts, "installArtifacts", false, "If enabled, it will install all artifacts to the local maven repository to make them available before running Fortify. This is required if any maven module has dependencies to other modules in the repository and they were not installed before.")
cmd.MarkFlagRequired("authToken")
cmd.MarkFlagRequired("serverUrl")
}
// retrieve step metadata
func fortifyExecuteScanMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "fortifyExecuteScan",
Aliases: []config.Alias{},
Description: "This step executes a Fortify scan on the specified project to perform static code analysis and check the source code for security flaws.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Resources: []config.StepResources{
{Name: "commonPipelineEnvironment"},
{Name: "buildDescriptor", Type: "stash"},
{Name: "deployDescriptor", Type: "stash"},
{Name: "tests", Type: "stash"},
{Name: "opensourceConfiguration", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "additionalScanParameters",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "authToken",
ResourceRef: []config.ResourceReference{
{
Name: "fortifyCredentialsId",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/fortify", "$(vaultBasePath)/$(vaultPipelineName)/fortify", "$(vaultBasePath)/GROUP-SECRETS/fortify"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "customScanVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "githubToken",
ResourceRef: []config.ResourceReference{
{
Name: "githubTokenCredentialsId",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/github", "$(vaultBasePath)/$(vaultPipelineName)/github", "$(vaultBasePath)/GROUP-SECRETS/github"},
Type: "vaultSecret",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "access_token"}},
},
{
Name: "autoCreate",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "modulePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pythonRequirementsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "autodetectClasspath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "mustAuditIssueGroups",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "spotAuditIssueGroups",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pythonRequirementsInstallSuffix",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pythonVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "uploadResults",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "version",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "artifactVersion",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyProjectVersion"}},
},
{
Name: "buildDescriptorFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "buildDescriptorFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "commitId",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitId",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "commitMessage",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "git/commitMessage",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "githubApiUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "owner",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/owner",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "githubOrg"}},
},
{
Name: "repository",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/repository",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "githubRepo"}},
},
{
Name: "memory",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "updateRulePack",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "reportDownloadEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyReportDownloadEndpoint"}},
},
{
Name: "pollingMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "quickScan",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "translate",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "src",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "exclude",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "apiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyApiEndpoint"}},
},
{
Name: "reportType",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pythonAdditionalPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "artifactUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "considerSuspicious",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "fprUploadEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyFprUploadEndpoint"}},
},
{
Name: "projectName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyProjectName"}},
},
{
Name: "reporting",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "fortifyServerUrl"}, {Name: "sscUrl"}},
},
{
Name: "pullRequestMessageRegexGroup",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "deltaMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "spotCheckMinimum",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "fprDownloadEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "fortifyFprDownloadEndpoint"}},
},
{
Name: "versioningModel",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "GENERAL", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "defaultVersioningModel"}},
},
{
Name: "pythonInstallCommand",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "reportTemplateId",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "filterSetTitle",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pullRequestName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pullRequestMessageRegex",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "buildTool",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "projectSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/projectSettingsFile"}},
},
{
Name: "globalSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/globalSettingsFile"}},
},
{
Name: "m2Path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/m2Path"}},
},
{
Name: "verifyOnly",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "installArtifacts",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
},
},
Containers: []config.Container{
{},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "influx",
Type: "influx",
Parameters: []map[string]interface{}{
{"Name": "fortify_data"}, {"fields": []map[string]string{{"name": "projectName"}, {"name": "projectVersion"}, {"name": "violations"}, {"name": "corporateTotal"}, {"name": "corporateAudited"}, {"name": "auditAllTotal"}, {"name": "auditAllAudited"}, {"name": "spotChecksTotal"}, {"name": "spotChecksAudited"}, {"name": "spotChecksGap"}, {"name": "suspicious"}, {"name": "exploitable"}, {"name": "suppressed"}}},
},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_authToken\"",
"\"PIPER_customScanVersion\"",
"\"PIPER_githubToken\"",
"\"PIPER_pythonRequirementsFile\"",
"\"PIPER_pythonRequirementsInstallSuffix\"",
"\"PIPER_version\"",
"\"PIPER_commitId\"",
"\"PIPER_commitMessage\"",
"\"PIPER_owner\"",
"\"PIPER_repository\"",
"\"PIPER_translate\"",
"\"PIPER_artifactUrl\"",
"\"PIPER_serverUrl\"",
"\"PIPER_pullRequestName\"",
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\""
] |
[] |
[
"PIPER_commitId",
"PIPER_version",
"PIPER_githubToken",
"PIPER_translate",
"PIPER_pythonRequirementsFile",
"PIPER_globalSettingsFile",
"PIPER_commitMessage",
"PIPER_artifactUrl",
"PIPER_serverUrl",
"PIPER_projectSettingsFile",
"PIPER_pullRequestName",
"PIPER_m2Path",
"PIPER_customScanVersion",
"PIPER_authToken",
"PIPER_pythonRequirementsInstallSuffix",
"PIPER_repository",
"PIPER_owner"
] |
[]
|
["PIPER_commitId", "PIPER_version", "PIPER_githubToken", "PIPER_translate", "PIPER_pythonRequirementsFile", "PIPER_globalSettingsFile", "PIPER_commitMessage", "PIPER_artifactUrl", "PIPER_serverUrl", "PIPER_projectSettingsFile", "PIPER_pullRequestName", "PIPER_m2Path", "PIPER_customScanVersion", "PIPER_authToken", "PIPER_pythonRequirementsInstallSuffix", "PIPER_repository", "PIPER_owner"]
|
go
| 17 | 0 | |
distributed/nanny.py
|
import asyncio
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import warnings
import weakref
import dask
from dask.system import CPU_COUNT
from tornado.ioloop import IOLoop
from tornado import gen
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
get_ip,
mp_context,
silence_logging,
json_load_robust,
PeriodicCallback,
parse_timedelta,
ignoring,
TimeoutError,
)
from .worker import run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = None
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory="dask-worker-space",
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.listen_args = self.security.get_listen_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or {}
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
self.local_directory = local_directory
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super(Nanny, self).__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_address = address_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
)
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = "init"
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with ignoring(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
""" Start nanny, start local process, start watching """
await self.listen(self._start_address, listen_args=self.listen_args)
self.ip = get_address_host(self.address)
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == "running":
assert self.worker_address
self.status = "running"
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self.local_directory,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
self.auto_restart = True
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
result = await self.process.start()
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
proc = self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != "running":
return
process = self.process.process
if process is None:
return
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in ("closing", "closed"):
try:
await self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in ("closing", "closed", "closing-gracefully"):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == "closing-gracefully":
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = "closing-gracefully"
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == "closing":
await self.finished()
assert self.status == "closed"
if self.status == "closed":
return "OK"
self.status = "closing"
logger.info("Closing Nanny at %r", self.address)
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = "closed"
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = "init"
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == "running":
return self.status
if self.status == "starting":
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = "starting"
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
return
msg = await self._wait_until_connected(uid)
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = "running"
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != "stopped":
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = "stopped"
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == "stopped":
return
if self.status == "stopping":
await self.stopped.wait()
return
assert self.status in ("starting", "running")
self.status = "stopping"
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != "starting":
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(delay)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
await self.process.join()
raise msg
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
pass
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config.py
|
"""Define configuration variables in experiment, model and training levels.
Quick Setup
===========
Change the values in the dictionary `SETUP` for a quick setup.
Documentation is provided right after each key.
Configuration
=============
More configuration options are provided. Four dictionaries `EXP_CONFIG`,
`DATA_CONFIG`, `MODEL_CONFIG` and `TRAIN_CONFIG` define experiment-, data-,
model- and training-related configuration variables, respectively.
Note that the automatically-determined experiment name is based only on the
values defined in the dictionary `SETUP`, so remember to provide the experiment
name manually if you have changed the configuration so that you won't overwrite
existing experiment directories.
"""
import os
import shutil
import distutils.dir_util
import importlib
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Quick setup
SETUP = {
'exp_name': 'chorales',
# The experiment name. Also the name of the folder that will be created
# in './exp/' and all the experiment-related files are saved in that
# folder. None to determine automatically. The automatically-
# determined experiment name is based only on the values defined in the
# dictionary `SETUP`, so remember to provide the experiment name manually
# (so that you won't overwrite a trained model).
'training_data': 'fourtracks.npy',
# Filename of the training data. The training data can be loaded from a npy
# file in the hard disk or from the shared memory using SharedArray package.
# Note that the data will be reshaped to (-1, num_bar, num_timestep,
# num_pitch, num_track) and remember to set these variable to proper values,
# which are defined in `MODEL_CONFIG`.
'training_data_location': 'hd',
# Location of the training data. 'hd' to load from a npy file stored in the
# hard disk. 'sa' to load from shared array using SharedArray package.
'gpu': '0',
# The GPU index in os.environ['CUDA_VISIBLE_DEVICES'] to use.
'prefix': 'chorales',
# Prefix for the experiment name. Useful when training with different
# training data to avoid replacing the previous experiment outputs.
'mode': 'phrase',
# {'bar', 'phrase', None}
# Use the two common modes which come with several presets and
# pretrained models or set to None and setup `MODEL_CONFIG['num_bar']`
# to define the number of bars to output.
'sample_along_training': True,
# True to generate samples along the training process. False for nothing.
'evaluate_along_training': True,
# True to run evaluation along the training process. False for nothing.
'verbose': True,
# True to print each batch details to stdout. False to print once an epoch.
'two_stage_training': True,
# True to train the model in a two-stage training setting. False to
# train the model in an end-to-end manner.
'training_phase': 'train',
# {'train', 'pretrain'}
# The training phase in a two-stage training setting. Only effective
# when `two_stage_training` is True.
'joint_training': True,
# True to train the generator and the refiner jointly. Only effective
# when `two_stage_training` is True and `training_phase` is 'train'.
'pretrained_dir': None,
# The directory containing the pretrained model. None to retrain the
# model from scratch.
'first_stage_dir': 'exp/chorales/checkpoints/',
# The directory containing the pretrained first-stage model. None to
# determine automatically (assuming default `exp_name`). Only effective
# when `two_stage_training` is True and `training_phase` is 'train'.
'preset_g': 'proposed',
# {'proposed', 'proposed_small', None}
# Use a preset network architecture for the generator or set to None and
# setup `MODEL_CONFIG['net_g']` to define the network architecture.
'preset_d': 'proposed',
# {'proposed', 'proposed_small', 'ablated', 'baseline', None}
# Use a preset network architecture for the discriminator or set to None
# and setup `MODEL_CONFIG['net_d']` to define the network architecture.
'preset_r': 'proposed_round',
# {'proposed_round', 'proposed_bernoulli'}
# Use a preset network architecture for the refiner or set to None and
# setup `MODEL_CONFIG['net_r']` to define the network architecture.
}
#===============================================================================
#=========================== TensorFlow Configuration ==========================
#===============================================================================
os.environ['CUDA_VISIBLE_DEVICES'] = SETUP['gpu']
TF_CONFIG = tf.ConfigProto()
TF_CONFIG.gpu_options.allow_growth = True
#===============================================================================
#========================== Experiment Configuration ===========================
#===============================================================================
EXP_CONFIG = {
'exp_name': None,
'two_stage_training': None,
'pretrained_dir': None,
'first_stage_dir': None,
}
if EXP_CONFIG['two_stage_training'] is None:
EXP_CONFIG['two_stage_training'] = SETUP['two_stage_training']
if EXP_CONFIG['pretrained_dir'] is None:
EXP_CONFIG['pretrained_dir'] = SETUP['pretrained_dir']
# Set default experiment name
if EXP_CONFIG['exp_name'] is None:
if SETUP['exp_name'] is not None:
EXP_CONFIG['exp_name'] = SETUP['exp_name']
elif not SETUP['two_stage_training']:
EXP_CONFIG['exp_name'] = '_'.join(
(SETUP['prefix'], 'end2end', 'g', SETUP['preset_g'], 'd',
SETUP['preset_d'], 'r', SETUP['preset_r'])
)
elif SETUP['training_phase'] == 'pretrain':
EXP_CONFIG['exp_name'] = '_'.join(
(SETUP['prefix'], SETUP['training_phase'], 'g', SETUP['preset_g'],
'd', SETUP['preset_d'])
)
elif SETUP['training_phase'] == 'train':
if SETUP['joint_training']:
EXP_CONFIG['exp_name'] = '_'.join(
(SETUP['prefix'], SETUP['training_phase'], 'joint', 'g',
SETUP['preset_g'], 'd', SETUP['preset_d'], 'r',
SETUP['preset_r'])
)
else:
EXP_CONFIG['exp_name'] = '_'.join(
(SETUP['prefix'], SETUP['training_phase'], 'g',
SETUP['preset_g'], 'd', SETUP['preset_d'], 'r',
SETUP['preset_r'])
)
# Set default pretained model directory
if EXP_CONFIG['first_stage_dir'] is None:
if SETUP['first_stage_dir'] is not None:
EXP_CONFIG['first_stage_dir'] = SETUP['first_stage_dir']
else:
EXP_CONFIG['first_stage_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
'_'.join((SETUP['prefix'], 'pretrain', 'g', SETUP['preset_g'],
'd', SETUP['preset_d'])), 'checkpoints'
)
#===============================================================================
#============================= Data Configuration ==============================
#===============================================================================
DATA_CONFIG = {
'training_data': None,
'training_data_location': None,
}
if DATA_CONFIG['training_data'] is None:
DATA_CONFIG['training_data'] = SETUP['training_data']
if DATA_CONFIG['training_data_location'] is None:
DATA_CONFIG['training_data_location'] = SETUP['training_data_location']
#===============================================================================
#=========================== Training Configuration ============================
#===============================================================================
TRAIN_CONFIG = {
'sample_along_training': None,
'evaluate_along_training': None,
'verbose': None,
'two_stage_training': None,
'training_phase': None,
'num_epoch': 20,
'slope_annealing_rate': 1.1,
}
if TRAIN_CONFIG['sample_along_training'] is None:
TRAIN_CONFIG['sample_along_training'] = SETUP['sample_along_training']
if TRAIN_CONFIG['evaluate_along_training'] is None:
TRAIN_CONFIG['evaluate_along_training'] = SETUP['evaluate_along_training']
if TRAIN_CONFIG['training_phase'] is None:
TRAIN_CONFIG['training_phase'] = SETUP['training_phase']
if TRAIN_CONFIG['verbose'] is None:
TRAIN_CONFIG['verbose'] = SETUP['verbose']
#===============================================================================
#============================= Model Configuration =============================
#===============================================================================
MODEL_CONFIG = {
# Models
'joint_training': None,
# Parameters
'batch_size': 16, # Note: tf.layers.conv3d_transpose requires a fixed batch
# size in TensorFlow < 1.6
'gan': {
'type': 'wgan-gp', # 'gan', 'wgan'
'clip_value': .01,
'gp_coefficient': 10.
},
'optimizer': {
# Parameters for Adam optimizers
'lr': .002,
'beta1': .5,
'beta2': .9,
'epsilon': 1e-8
},
# Data
'num_bar': None,
'num_beat': 4,
'num_pitch': 84,
'num_track': 4,
'num_timestep': 96,
'beat_resolution': 24,
'lowest_pitch': 24, # MIDI note number of the lowest pitch in data tensors
# Tracks
'track_names': (
'Soprano', 'Alto', 'Tenor', 'Bass'
),
'programs': (55, 55, 55, 55),
'is_drums': (False, False, False, False),
# Network architectures (define them here if not using the presets)
'net_g': None,
'net_d': None,
'net_r': None,
# Playback
'pause_between_samples': 48,
'tempo': 70.,
# Samples
'num_sample': 16,
'sample_grid': (2, 8),
# Metrics
'metric_map': np.array([
# indices of tracks for the metrics to compute
[True] * 8, # empty bar rate
[True] * 8, # number of pitch used
[False] + [True] * 7, # qualified note rate
[False] + [True] * 7, # polyphonicity
[False] + [True] * 7, # in scale rate
[True] + [False] * 7, # in drum pattern rate
[False] + [True] * 7 # number of chroma used
], dtype=bool),
'tonal_distance_pairs': [(1, 2)], # pairs to compute the tonal distance
'scale_mask': list(map(bool, [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1])),
'drum_filter': np.tile([1., .1, 0., 0., 0., .1], 16),
'tonal_matrix_coefficient': (1., 1., .5),
# Directories
'checkpoint_dir': None,
'sample_dir': None,
'eval_dir': None,
'log_dir': None,
'src_dir': None,
}
if MODEL_CONFIG['joint_training'] is None:
MODEL_CONFIG['joint_training'] = SETUP['joint_training']
# Set mode
if MODEL_CONFIG['num_bar'] is None:
if SETUP['mode'] == 'bar':
MODEL_CONFIG['num_bar'] = 1
elif SETUP['mode'] == 'phrase':
MODEL_CONFIG['num_bar'] = 4
# Import preset network architectures
if MODEL_CONFIG['net_g'] is None:
IMPORTED = importlib.import_module('.'.join((
'musegan.bmusegan.presets', SETUP['mode'], 'generator',
SETUP['preset_g']
)))
MODEL_CONFIG['net_g'] = IMPORTED.NET_G
if MODEL_CONFIG['net_d'] is None:
IMPORTED = importlib.import_module('.'.join((
'musegan.bmusegan.presets', SETUP['mode'], 'discriminator',
SETUP['preset_d']
)))
MODEL_CONFIG['net_d'] = IMPORTED.NET_D
if MODEL_CONFIG['net_r'] is None:
IMPORTED = importlib.import_module('.'.join((
'musegan.bmusegan.presets', SETUP['mode'], 'refiner', SETUP['preset_r']
)))
MODEL_CONFIG['net_r'] = IMPORTED.NET_R
# Set default directories
if MODEL_CONFIG['checkpoint_dir'] is None:
MODEL_CONFIG['checkpoint_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
EXP_CONFIG['exp_name'], 'checkpoints'
)
if MODEL_CONFIG['sample_dir'] is None:
MODEL_CONFIG['sample_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
EXP_CONFIG['exp_name'], 'samples'
)
if MODEL_CONFIG['eval_dir'] is None:
MODEL_CONFIG['eval_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
EXP_CONFIG['exp_name'], 'eval'
)
if MODEL_CONFIG['log_dir'] is None:
MODEL_CONFIG['log_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
EXP_CONFIG['exp_name'], 'logs'
)
if MODEL_CONFIG['src_dir'] is None:
MODEL_CONFIG['src_dir'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'exp',
EXP_CONFIG['exp_name'], 'src'
)
#===============================================================================
#=================== Make directories & Backup source code =====================
#===============================================================================
# Make sure directories exist
for path in (MODEL_CONFIG['checkpoint_dir'], MODEL_CONFIG['sample_dir'],
MODEL_CONFIG['eval_dir'], MODEL_CONFIG['log_dir'],
MODEL_CONFIG['src_dir']):
if not os.path.exists(path):
os.makedirs(path)
# Backup source code
for path in os.listdir(os.path.dirname(os.path.realpath(__file__))):
if os.path.isfile(path):
if path.endswith('.py'):
shutil.copyfile(
os.path.basename(path),
os.path.join(MODEL_CONFIG['src_dir'], os.path.basename(path))
)
distutils.dir_util.copy_tree(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'musegan'),
os.path.join(MODEL_CONFIG['src_dir'], 'musegan')
)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
src/main/java/yokohama/unit/DocyC.java
|
package yokohama.unit;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.URI;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import lombok.AllArgsConstructor;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.UnrecognizedOptionException;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.BeanFactory;
import yokohama.unit.position.ErrorMessage;
import yokohama.unit.position.Span;
import yokohama.unit.translator.CombinationStrategy;
import yokohama.unit.translator.DocyCompiler;
@AllArgsConstructor
public class DocyC implements Command {
BeanFactory context;
private final DocyCompiler compiler;
FileInputStreamFactory fileInputStreamFactory;
static Options constructOptions() {
Options options = new Options();
options.addOption(OptionBuilder
.withDescription("Generate no warnings")
.create("nowarn"));
options.addOption(OptionBuilder
.withDescription("Output messages about what the compiler is doing")
.create("verbose"));
options.addOption(OptionBuilder
.withDescription("Print a synopsis of standard options")
.create("help"));
options.addOption(OptionBuilder
.withDescription("Generate code to check @Invariant annotation")
.create("contract"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("base-packages")
.withDescription("Base packages where converter classes are located")
.create("converter"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("strategy")
.withDescription("Choose combination test strategy")
.create("combination"));
options.addOption(OptionBuilder
.withDescription("Emit Java code")
.create("j"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("path")
.withDescription("Specify where to find user class files and annotation processors")
.create("classpath"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("path")
.withDescription("Specify where to find user class files and annotation processors")
.create("cp"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("encoding")
.withDescription("Specify character encoding used by source files")
.create("encoding"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("directory")
.withDescription("Specify where to place generated class files")
.create("d"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("release")
.withDescription("Generate class files for specific VM version")
.create("target"));
options.addOption(OptionBuilder
.hasArg()
.withArgName("directory")
.withDescription("Base directory for docy files")
.create("basedir"));
return options;
}
static List<String> extractOptions(List<Option> options, List<String> extractedOptions) {
return options.stream()
.filter(option -> extractedOptions.contains(option.getOpt()) )
.flatMap(option ->
option.hasArg() ? Stream.of("-" + option.getOpt(), option.getValue())
: Stream.of("-" + option.getOpt()))
.collect(Collectors.toList());
}
static List<String> getClassPath(CommandLine commandLine) {
String cp = commandLine.getOptionValue("cp");
String classpath = commandLine.getOptionValue("classpath");
if (cp != null) {
return Arrays.asList(cp.split(File.pathSeparator));
} else if (classpath != null) {
return Arrays.asList(classpath.split(File.pathSeparator));
} else {
String env = System.getenv("DOCY_CLASSPATH");
if (env != null) {
return Arrays.asList(System.getenv("DOCY_CLASSPATH").split(File.pathSeparator));
} else {
return Arrays.asList();
}
}
}
@Override
@SuppressWarnings("unchecked")
public int run(InputStream in, PrintStream out, PrintStream err, String... args) {
URI baseDir;
Optional<Path> dest;
boolean emitJava;
boolean checkContract;
CombinationStrategy combinationStrategy;
List<String> converterBasePackages;
List<String> classPath;
List<String> javacArgs;
List<String> files;
try {
Options options = constructOptions();
List<String> javacOptions =
Arrays.asList("nowarn", "verbose", "target");
CommandLine commandLine = new BasicParser().parse(options, args);
if (commandLine.hasOption("help")) {
PrintWriter pw = new PrintWriter(err);
new HelpFormatter().printHelp(
pw,
80,
"docyc <options> <source files>",
"",
options,
1,
1,
"",
true
);
pw.flush();
return Command.EXIT_SUCCESS;
}
baseDir = Paths.get(commandLine.getOptionValue("basedir"), "").toUri();
String d = commandLine.getOptionValue("d");
dest = d == null ? Optional.empty() : Optional.of(Paths.get(d));
emitJava = commandLine.hasOption('j');
checkContract = commandLine.hasOption("contract");
combinationStrategy = context.getBean(
commandLine.getOptionValue("combination", "product"),
CombinationStrategy.class);
String converter= commandLine.getOptionValue("converter");
converterBasePackages = converter == null
? Collections.emptyList()
: Arrays.asList(converter.split(","));
classPath = getClassPath(commandLine);
javacArgs = extractOptions(
Arrays.asList(commandLine.getOptions()),
javacOptions);
files = commandLine.getArgList();
} catch (UnrecognizedOptionException e) {
err.println("docyc: invalid flag: " + e.getOption());
err.println("Usage: docyc <options> <source files>");
err.println("use -help for a list of possible options");
return Command.EXIT_FAILURE;
} catch (ParseException e) {
err.println("docyc: " + e.getMessage());
return Command.EXIT_FAILURE;
}
List<ErrorMessage> errors = files.stream().flatMap(file -> {
String className = FilenameUtils.getBaseName(file);
Path path = Paths.get(file).toAbsolutePath();
URI uri = path.toUri();
URI relativeUri = baseDir.relativize(uri).resolve(".");
String packageName = StringUtils.removeEnd(relativeUri.toString(),"/").replace("/", ".");
InputStream ins;
try {
ins = fileInputStreamFactory.create(path);
} catch (IOException e) {
Span span = Span.of(path);
return Stream.of(new ErrorMessage(e.getMessage(), span));
}
return compiler.compile(
path,
ins,
className,
packageName,
classPath,
dest,
emitJava,
checkContract,
combinationStrategy,
converterBasePackages,
javacArgs)
.stream();
}).collect(Collectors.toList());
if (errors.isEmpty()) {
return Command.EXIT_SUCCESS;
} else {
for (ErrorMessage errorMessage : errors) {
err.println(errorMessage);
}
return Command.EXIT_FAILURE;
}
}
}
|
[
"\"DOCY_CLASSPATH\"",
"\"DOCY_CLASSPATH\""
] |
[] |
[
"DOCY_CLASSPATH"
] |
[]
|
["DOCY_CLASSPATH"]
|
java
| 1 | 0 | |
pkg/slvlib/slvlib.go
|
// Copyright (c) Puneeth Rao Lokapalli 2022. All rights reserved.
// Licensed under the Apache license. See LICENSE file in the project root for full license information.
package slvlib
import (
"errors"
"os"
"github.com/lprao/slv-go-lib/internal/pkg/transport"
"github.com/lprao/slv-go-lib/pkg/logger"
slvpb "github.com/lprao/slv-proto"
)
// slvClient struct to manage the client side artifacts.
type slvClient struct {
// Grpc pkg object
grpc *transport.Grpc
}
var (
client *slvClient
log logger.Log
)
const (
SLV_SVC_CA_CERT = "certs/ca/ca.crt"
)
// Send the user requested operation to slv-svc
func (s *slvClient) execOp(slvVar *slvpb.SlvVar, op slvpb.Operation, accessToken string) (*slvpb.SlvVar, error) {
req := slvpb.ExecOpReq{
Operation: op,
Var: slvVar,
}
resp, err := s.grpc.DoGrpc(req, accessToken)
if err != nil {
return &slvpb.SlvVar{}, nil
}
return resp.Var, nil
}
// Initial setup and validations.
func init() {
log.InitLogger()
ep := os.Getenv("SLV_SVC_ENDPOINT")
if ep == "" {
log.Fatalf("SLV_SVC_ENDPOINT environment variable not set")
return
}
var insecure bool = true
_, err := os.Stat(SLV_SVC_CA_CERT)
if err != nil && errors.Is(err, os.ErrNotExist) {
log.Warnf("CA cert for slv-svc not provided, using insecure mode")
insecure = false
}
client.grpc = transport.NewGrpcClient(ep, insecure, SLV_SVC_CA_CERT)
}
|
[
"\"SLV_SVC_ENDPOINT\""
] |
[] |
[
"SLV_SVC_ENDPOINT"
] |
[]
|
["SLV_SVC_ENDPOINT"]
|
go
| 1 | 0 | |
run_pretraining.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import modeling
import optimization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file",
"./bert_config_file",
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.",
)
flags.DEFINE_string(
"input_file", None, "Input TF example files (can be a glob or comma separated)."
)
flags.DEFINE_string(
"output_dir",
None,
"The output directory where the model checkpoints will be written.",
)
## Other parameters
flags.DEFINE_string(
"init_checkpoint",
None,
"Initial checkpoint (usually from a pre-trained BERT model).",
)
flags.DEFINE_integer(
"max_seq_length",
128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.",
)
flags.DEFINE_integer(
"max_predictions_per_seq",
20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.",
)
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", True, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10, "Number of warmup steps.")
flags.DEFINE_integer(
"save_checkpoints_steps", 5, "How often to save the model checkpoint."
)
flags.DEFINE_integer(
"iterations_per_loop", 1000, "How many steps to make in each estimator call."
)
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name",
None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.",
)
tf.flags.DEFINE_string(
"tpu_zone",
None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string(
"gcp_project",
None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores",
8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.",
)
flags.DEFINE_integer("num_gpus", 2, "Total number of GPUs to use.")
flags.DEFINE_bool("multi_worker", True, "Multi-worker training.")
# My additional flags
tf.app.flags.DEFINE_boolean("use_original_ckpt", True, "use original ckpt")
flags.DEFINE_integer("task_index", 0, "task_index")
flags.DEFINE_string(
"worker", "localhost:3000,localhost:3001", "specify workers in the cluster"
)
worker = FLAGS.worker.split(",")
task_index = FLAGS.task_index
os.environ["CUDA_VISIBLE_DEVICES"] = str(task_index)
if not FLAGS.use_original_ckpt:
tf.train.TFTunerContext.init_context(len(worker), task_index)
def model_fn_builder(
bert_config,
init_checkpoint,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings,
):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = mode == tf.estimator.ModeKeys.TRAIN
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
)
(
masked_lm_loss,
masked_lm_example_loss,
masked_lm_log_probs,
) = get_masked_lm_output(
bert_config,
model.get_sequence_output(),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
)
(
next_sentence_loss,
next_sentence_example_loss,
next_sentence_log_probs,
) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels
)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(
assignment_map,
initialized_variable_names,
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(
" name = %s, shape = %s%s", var.name, var.shape, init_string
)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu
)
if FLAGS.use_tpu:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn,
)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op, scaffold=None
)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_ids,
masked_lm_weights,
next_sentence_example_loss,
next_sentence_log_probs,
next_sentence_labels,
):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32
)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights
)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]
)
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32
)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions
)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss
)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (
metric_fn,
[
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_ids,
masked_lm_weights,
next_sentence_example_loss,
next_sentence_log_probs,
next_sentence_labels,
],
)
if FLAGS.use_tpu:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn,
)
else:
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=total_loss)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(
bert_config, input_tensor, output_weights, positions, label_ids, label_weights
):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range
),
)
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer(),
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32
)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range),
)
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer()
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]
)
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(
input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4
):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions": tf.FixedLenFeature(
[max_predictions_per_seq], tf.int64
),
"masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights": tf.FixedLenFeature(
[max_predictions_per_seq], tf.float32
),
"next_sentence_labels": tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length,
)
)
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True,
)
)
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project
)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.use_tpu:
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host,
),
)
else:
if FLAGS.multi_worker:
# distribution = tf.contrib.distribute.CollectiveAllReduceStrategy(num_gpus_per_worker=1)
# run_config = tf.estimator.RunConfig(
# experimental_distribute=tf.contrib.distribute.DistributeConfig(
# train_distribute=distribution,
# remote_cluster={
# 'worker': ['localhost:5000', 'localhost:5001'],
# },
# )
# )
os.environ["TF_CONFIG"] = json.dumps(
{
"cluster": {"worker": worker},
"task": {"type": "worker", "index": task_index},
}
)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
run_config = tf.estimator.RunConfig(
save_summary_steps=1,
train_distribute=strategy,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_ckpt_steps,
log_step_count_steps=1,
)
else:
distribution = tf.contrib.distribute.MirroredStrategy(
num_gpus=FLAGS.num_gpus
)
run_config = tf.estimator.RunConfig(train_distribute=distribution)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=True,
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
if FLAGS.use_tpu:
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={
"batch_size": FLAGS.train_batch_size
if FLAGS.do_train
else FLAGS.eval_batch_size,
},
)
if FLAGS.do_train and FLAGS.do_eval:
tf.logging.info("***** Running training *****")
tf.logging.info(" Training batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True,
)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False,
)
tf.estimator.train_and_evaluate(
estimator,
train_spec=tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=100),
eval_spec=tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=10),
)
# if FLAGS.do_train:
# tf.logging.info("***** Running training *****")
# tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
# train_input_fn = input_fn_builder(
# input_files=input_files,
# max_seq_length=FLAGS.max_seq_length,
# max_predictions_per_seq=FLAGS.max_predictions_per_seq,
# is_training=True)
# estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
# if FLAGS.do_eval:
# tf.logging.info("***** Running evaluation *****")
# tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# eval_input_fn = input_fn_builder(
# input_files=input_files,
# max_seq_length=FLAGS.max_seq_length,
# max_predictions_per_seq=FLAGS.max_predictions_per_seq,
# is_training=False)
# result = estimator.evaluate(
# input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
# output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
# with tf.gfile.GFile(output_eval_file, "w") as writer:
# tf.logging.info("***** Eval results *****")
# for key in sorted(result.keys()):
# tf.logging.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"TF_CONFIG"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "TF_CONFIG"]
|
python
| 2 | 0 | |
internal/output/colorstyle/console_windows.go
|
package colorstyle
/*
Sourced from: https://github.com/daviddengcn/go-colortext
See license.txt in same directory for license information
*/
import (
"io"
"os"
"syscall"
"unsafe"
"github.com/ActiveState/cli/internal/logging"
)
var consoleStyleMap = map[Style]uint16{
Black: 0,
Red: consoleRed,
Green: consoleGreen,
Yellow: consoleRed | consoleGreen,
Blue: consoleBlue,
Magenta: consoleRed | consoleBlue,
Cyan: consoleGreen | consoleBlue,
White: consoleRed | consoleGreen | consoleBlue}
const (
consoleBlue = uint16(0x0001)
consoleGreen = uint16(0x0002)
consoleRed = uint16(0x0004)
consoleIntensity = uint16(0x0008)
consoleColorMask = consoleBlue | consoleGreen | consoleRed | consoleIntensity
)
const (
stdOutHandle = uint32(-11 & 0xFFFFFFFF)
)
type consoleBufferDimensions struct {
X, Y int16
}
type consoleBuffer struct {
DwSize consoleBufferDimensions
DwCursorPosition consoleBufferDimensions
WAttributes uint16
SrWindow struct {
Left, Top, Right, Bottom int16
}
DwMaximumWindowSize consoleBufferDimensions
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
procSetconsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procGetconsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
hStdout uintptr
bufferInfo *consoleBuffer
)
func init() {
kernel32 := syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
hStdout, _, _ = procGetStdHandle.Call(uintptr(stdOutHandle))
bufferInfo = getConsoleScreenBufferInfo(hStdout)
syscall.LoadDLL("")
}
type Styler struct {
}
func New(writer io.Writer) *Styler {
return &Styler{}
}
func (st *Styler) SetStyle(s Style, bright bool) {
if bufferInfo == nil {
return
}
defer func() {
if r := recover(); r != nil {
if os.Getenv("CI") == "" {
logging.Errorf("colorstyle.SetStyle failed with: %v", r)
}
}
}()
if s == Bold || s == Underline {
return // underline/bold is not supported on windows
}
attr := uint16(0)
if s == Default || s == Reset {
attr = bufferInfo.WAttributes
} else if s == Dim {
attr = attr & ^consoleColorMask | consoleStyleMap[Black]
bright = true
} else {
if style, ok := consoleStyleMap[s]; ok {
attr = attr & ^consoleColorMask | style
}
}
if bright {
attr |= consoleIntensity
}
setConsoleTextAttribute(hStdout, attr)
}
func setConsoleTextAttribute(hconsoleOutput uintptr, wAttributes uint16) bool {
ret, _, _ := procSetconsoleTextAttribute.Call(
hconsoleOutput,
uintptr(wAttributes))
return ret != 0
}
func getConsoleScreenBufferInfo(hconsoleOutput uintptr) *consoleBuffer {
var csbi consoleBuffer
if ret, _, _ := procGetconsoleScreenBufferInfo.Call(hconsoleOutput, uintptr(unsafe.Pointer(&csbi))); ret == 0 {
return nil
}
return &csbi
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
docker/main.go
|
package main
import (
"fmt"
kuhnuri "github.com/kuhnuri/go-worker"
"io/ioutil"
"log"
"net/url"
"os"
"os/exec"
"path/filepath"
)
type Args struct {
src *url.URL
dst *url.URL
tmp string
out string
}
func readArgs() *Args {
input := os.Getenv("input")
if input == "" {
log.Fatalf("Input environment variable not set")
}
output := os.Getenv("output")
if output == "" {
log.Fatalf("Output environment variable not set")
}
src, err := url.Parse(input)
if err != nil {
log.Fatalf("Failed to parse input argument %s: %v", input, err)
}
dst, err := url.Parse(output)
if err != nil {
log.Fatalf("Failed to parse output argument %s: %v", output, err)
}
tmp, err := ioutil.TempDir("", "tmp")
if err != nil {
log.Fatalf("Failed to create temporary directory: %v", err)
}
out, err := ioutil.TempDir("", "out")
if err != nil {
log.Fatalf("Failed to create temporary directory: %v", err)
}
return &Args{src, dst, tmp, out}
}
func convert(srcDir string, dstDir string) error {
filepath.Walk(srcDir, func(src string, info os.FileInfo, err error) error {
if filepath.Ext(src) == ".html" || filepath.Ext(src) == ".fo" {
rel, err := filepath.Rel(srcDir, src)
if err != nil {
return fmt.Errorf("Failed to relativize source file path: %v", err)
}
dst := kuhnuri.WithExt(filepath.Join(dstDir, rel), ".pdf")
dir := filepath.Dir(dst)
if err := kuhnuri.MkDirs(dir); err != nil {
return err
}
fmt.Printf("INFO: Convert %s %s\n", src, dst)
cmd := exec.Command("/AHFormatter/bin/AHFCmd",
"-d", src,
"-o", dst,
"-x", "4")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("Failed to convert: %v", err)
}
}
return nil
})
return nil
}
func main() {
args := readArgs()
if _, err := kuhnuri.DownloadFile(args.src, args.tmp); err != nil {
log.Fatalf("Failed to download %s: %v", args.src, err)
}
if err := convert(args.tmp, args.out); err != nil {
log.Fatalf("Failed to convert %s: %v", args.tmp, err)
}
if err := kuhnuri.UploadFile(args.out, args.dst); err != nil {
log.Fatalf("Failed to upload %s: %v", args.dst, err)
}
}
|
[
"\"input\"",
"\"output\""
] |
[] |
[
"output",
"input"
] |
[]
|
["output", "input"]
|
go
| 2 | 0 | |
amplify/backend/function/tempsearch/src/index.py
|
import string
import json
import springer_api
import elsevier_api
import ieee_api
from enum import Enum
import os
import boto3
def handler(event, context):
"""
Takes the search given by event and returns a 20 records per api,
or if page = 0 saves the search.
ExampleEvent = {
"event":
{
"sub": "xxxxxxxx-xxxxxxxx" # must be set when saving
"query": {
"condition": "AND",
"rules": [
{
"field": "article_title",
"id": "article_title",
"input": "text",
"operator": "contains",
"type": "string",
"value": "systematic review"
},
{
"field": "openaccess",
"id": "openaccess",
"input": "select",
"operator": "equal",
"type": "integer",
"value": 1
}
],
"valid": True
},
"page": 1, # 0 to save
"name": "name of search" # must be set when saving
"databases": [
{
"name": "ieee"
},
{
"name": "springer"
}
]
}
}
Parameters
----------
event : json
Json with infos about the user and the requested search (look ExampleEvent)
context : context
Needed by lambda
"""
# read query from event
query = event["event"]["query"]
# results json
results = {}
# set booleans for api decision
springer, elsevier, ieee = False, False, False
for api in event["event"]["databases"]:
if api['name'] == 'springer':
springer = True
elif api['name'] == 'elsevier':
elsevier = True
elif api['name'] == 'ieee':
ieee = True
# read terms from query
terms = read_terms(query)
# check if search should be saved and fetch all
if event["event"]["page"] == 0:
# save results and terms to db
print(event)
boto3_client = boto3.client('lambda', region_name='eu-central-1')
boto3_client.invoke(FunctionName='savesearch-dustindev', InvocationType='Event', Payload=json.dumps(event))
return {
"statusCode": 200,
'message': 'save initiated'
}
# if a page is requested fetch results and return them
# springer results
if springer:
results = springer_api.fetch_springer(
event["event"]["query"], os.environ["SPRINGER"], event["event"]["page"])
# add total results for springer (total results already set)
results.update({"springer_total": int(results["total"])})
else:
results = {
'records': [],
'total': 0
}
# elsevier results
if elsevier:
elsevier_results = elsevier_api.fetch_elsevier(
query, os.environ["ELSEVIER"], event["event"]["page"])
for record in elsevier_results['records']:
results['records'].append(record)
# add total results for elsevier and update total results
results["elsevier_total"] = elsevier_results["total"]
results.update({"total": int(results["total"]) + int(elsevier_results["total"])})
# print(json.dumps(results, indent=4))
# ieee results
if ieee:
ieee_results = ieee_api.fetch_ieee(query, os.environ["IEEE"], event["event"]["page"])
for record in ieee_results['records']:
results['records'].append(record)
# add total results for ieee and update total results
results["ieee_total"] = int(ieee_results["total"])
results.update({"total": int(results["total"]) + int(ieee_results["total"])})
# remove duplicates
results = removeDuplicates(results)
# create and return json
response = {
"statusCode": 200,
"body": results
}
return response
def removeDuplicates(results):
"""
Removes duplicates from results json
Parameters
----------
results : json
Json with all fetched results
"""
dois = []
clean_results = {"records": []}
for result in results['records']:
if 'doi' not in result:
clean_results['records'].append(result)
elif (result['doi'] not in dois):
dois.append(result['doi'])
else:
results['total'] = int(results['total']) - 1
for result in results['records']:
if ('doi' in result and result['doi'] in dois):
clean_results['records'].append(result)
dois.remove(result['doi'])
clean_results.update({"total": results['total']})
if 'springer_total' in results:
clean_results.update({"springer_total": results['springer_total']})
if 'elsevier_total' in results:
clean_results.update({"elsevier_total": results['elsevier_total']})
if 'ieee_total' in results:
clean_results.update({"ieee_total": results['ieee_total']})
return clean_results
class Types(Enum):
Chapter = 1
ConferencePaper = 2
Article = 3
ReferenceWorkEntry = 4
Book = 5
ConferenceProceedings = 6
VideoSegment = 7
BookSeries = 8
Journal = 9
Video = 10
ReferenceWork = 11
Protocol = 12
def filterDate(results, givenDate):
for record in results["records"]:
date = record["date"]
if int(date[0:4]) < givenDate:
results["records"].remove(record)
results = filterDate(results, givenDate)
break
return results
def filterOpenAccess(results):
for record in results["records"]:
if record["openaccess"] == False:
results["records"].remove(record)
results = filterOpenAccess(results)
break
return results
def filterType(results, givenType):
for record in results["records"]:
if record["type"] != givenType:
results["records"].remove(record)
results = filterType(results, givenType)
break
return results
def calculateTFIDF(results, searchQuerys):
queryString = ""
for searchQuery in searchQuerys["rules"]:
if searchQuery["field"] == "title" and searchQuery["input"] == "text" and searchQuery["operator"] == "contains":
queryString = queryString + " " + searchQuery["value"]
searchString = queryString.strip(string.punctuation)
searchStrings = searchString.split()
print(searchStrings)
for record in results["records"]:
words = record["abstract"].split(" ")
numberOfWords = len(words)
countMatches = 1
for word in words:
cleanedWord = word.strip(string.punctuation)
for searchString in searchStrings:
if cleanedWord.lower() == searchString.lower():
countMatches += 1
record.update({"tfidf": int(round((10000*countMatches)/numberOfWords))})
return results
def read_terms(query):
'''Read the terms and put them to a list
Parameters
----------
query : json
Json with the query
Returns
-------
list
List with terms
'''
terms = []
terms = get_terms(query, terms)
return terms
def get_terms(query, terms):
'''Helper function to get terms
Parameters
----------
query : json
Json with the query
terms :list
Empty list
Returns
-------
list
List with terms
'''
for rule in query['rules']:
if "condition" in rule:
terms = get_terms(rule, terms)
else:
terms.append(handleSingleRule(rule, terms))
return terms
def handleSingleRule(rule, terms):
'''Helper function to term from a querybuilder rule
Parameters
----------
rule : json
Json with the rule of the query
terms :list
terms list to add a term
Returns
-------
list
List with terms
'''
if rule['field'] == 'doi':
term = {
"type": "DOI",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'keyword':
term = {
"type": "Keyword",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'publication_title' and rule['operator'] == 'equals':
term = {
"type": "Publication Title",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'publication_year':
term = {
"type": "Publication Year",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'isbn':
term = {
"type": "ISBN",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'issn':
term = {
"type": "ISSN",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'openaccess' and rule['value'] == 1:
term = {
"type": "Open Access",
"term": "True",
"description": ""
}
return term
elif rule['field'] == 'openaccess' and rule['value'] == 0:
term = {
"type": "Open Access",
"term": "False",
"description": ""
}
return term
elif rule['field'] == 'article_title':
term = {
"type": "Article Title",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'publication_title' and rule['operator'] == 'contains':
term = {
"type": "Publication Title",
"term": rule['value'],
"description": ""
}
return term
elif rule['field'] == 'author':
term = {
"type": "Author",
"term": rule['value'],
"description": ""
}
return term
if __name__ == "__main__":
event = {
"event":
{
"query": {
"condition": "AND",
"rules": [
{
"field": "article_title",
"id": "article_title",
"input": "text",
"operator": "contains",
"type": "string",
"value": "chaos theory"
},
{
"field": "openaccess",
"id": "openaccess",
"input": "select",
"operator": "equal",
"type": "integer",
"value": 1
}
],
"valid": True
},
"page": 0,
"sub": "",
"databases": [
{
"name": "ieee"
},
{
"name": "springer"
},
{
"name": "elsevier"
}
]
}
}
ExampleEvent = {
"event":
{
# "sub": "149ec720-ccbc-43c3-a854-e70665546ea6",
"query": {
"condition": "AND",
"rules": [
{
"field": "article_title",
"id": "article_title",
"input": "text",
"operator": "contains",
"type": "string",
"value": "chaos theory"
},
{
"field": "openaccess",
"id": "openaccess",
"input": "select",
"operator": "equal",
"type": "integer",
"value": 1
}
],
"valid": True
},
"page": 0,
"name": "hmmmm hmmmmmsssss", # must be set when saving
"sub": "149ec720-ccbc-43c3-a854-e70665546ea6",
"review_id": "5f0dc32d9424f7348d240293",
"databases": [
{
"name": "springer"
},
{
"name": "elsevier"
},
{
"name": "ieee"
}
]
}
}
# print()
print(handler(ExampleEvent, 0))
terms_test_query = {
"condition": "AND",
"rules": [
{
"id": "openaccess",
"field": "openaccess",
"type": "integer",
"input": "number",
"operator": "less",
"value": 1
},
{
"condition": "OR",
"rules": [
{
"id": "category",
"field": "article_title",
"type": "integer",
"input": "select",
"operator": "equal",
"value": "title 1"
},
{
"id": "category",
"field": "article_title",
"type": "integer",
"input": "select",
"operator": "equal",
"value": "title 2"
}
]
}
]
}
terms = []
# print(read_terms(terms_test_query))
# with open('results.json', 'r') as f:
# results = json.load(f)
# results = removeDuplicates(results)
doi_example = {
"condition": "AND",
"rules": [
{
"field": "article_title",
"id": "article_title",
"input": "text",
"operator": "contains",
"type": "string",
"value": "chaos theory"
},
{
"field": "publication_year",
"id": "publication_year",
"input": "number",
"operator": "equal",
"type": "integer",
"value": 2017
}
]
}
|
[] |
[] |
[
"ELSEVIER",
"SPRINGER",
"IEEE"
] |
[]
|
["ELSEVIER", "SPRINGER", "IEEE"]
|
python
| 3 | 0 | |
atomsci/ddm/pipeline/model_pipeline.py
|
#!/usr/bin/env python
"""
Contains class ModelPipeline, which loads in a dataset, splits it, trains a model, and generates predictions and output
metrics for that model. Works for a variety of featurizers, splitters and other parameters on a generic dataset
"""
import json
import logging
import os
import io
import sys
import time
import uuid
import tempfile
import tarfile
import deepchem as dc
import numpy as np
import time
import pandas as pd
import scipy as sp
from sklearn.metrics import pairwise_distances
import pdb
import copy
from atomsci.ddm.utils import datastore_functions as dsf
import atomsci.ddm.utils.model_version_utils as mu
import pkg_resources
if ('site-packages' in dsf.__file__) or ('dist-packages' in dsf.__file__): # install_dev.sh points to github directory
import subprocess
import json
data = subprocess.check_output(["pip", "list", "--format", "json"])
parsed_results = json.loads(data)
ampl_version=next(item for item in parsed_results if item["name"] == "atomsci-ampl")['version']
else:
try:
VERSION_fn = os.path.join(
os.path.dirname(pkg_resources.resource_filename('atomsci', '')),
'VERSION')
except:
VERSION_fn = dsf.__file__.rsplit('/', maxsplit=4)[0]+'/VERSION'
f=open(VERSION_fn, 'r')
ampl_version = f.read().strip()
f.close()
from atomsci.ddm.pipeline import model_datasets as model_datasets
from atomsci.ddm.pipeline import model_wrapper as model_wrapper
from atomsci.ddm.pipeline import featurization as feat
from atomsci.ddm.pipeline import parameter_parser as parse
from atomsci.ddm.pipeline import model_tracker as trkr
from atomsci.ddm.pipeline import transformations as trans
logging.basicConfig(format='%(asctime)-15s %(message)s')
# ---------------------------------------------
def calc_AD_kmean_dist(train_dset, pred_dset, k, train_dset_pair_distance=None, dist_metric="euclidean"):
"""
calculate the probability of the prediction dataset fall in the the domain of traning set. Use Euclidean distance of the K nearest neighbours.
train_dset and pred_dset should be in 2D numpy array format where each row is a compound.
"""
if train_dset_pair_distance is None:
# calcualate the pairwise distance of training set
train_dset_pair_distance = pairwise_distances(X=train_dset, metric=dist_metric)
train_kmean_dis = []
for i in range(len(train_dset_pair_distance)):
kn_idx = np.argpartition(train_dset_pair_distance[i], k+1)
dis = np.mean(train_dset_pair_distance[i][kn_idx[:k+1]])
train_kmean_dis.append(dis)
train_dset_distribution = sp.stats.norm.fit(train_kmean_dis)
# pairwise distance between train and pred set
pred_size = len(pred_dset)
train_pred_dis = pairwise_distances(X=pred_dset, Y=train_dset, metric=dist_metric)
pred_kmean_dis_score = np.zeros(pred_size)
for i in range(pred_size):
pred_km_dis = np.mean(np.sort(train_pred_dis[i])[:k])
train_dset_std = train_dset_distribution[1] if train_dset_distribution[1] != 0 else 1e-6
pred_kmean_dis_score[i] = max(1e-6, (pred_km_dis - train_dset_distribution[0]) / train_dset_std)
return pred_kmean_dis_score
# ---------------------------------------------
def calc_AD_kmean_local_density(train_dset, pred_dset, k, train_dset_pair_distance=None, dist_metric="euclidean"):
"""
Evaluate the AD of pred data by comparing the distance betweenthe unseen object and its k nearest neighbors in the training set to the distance between these k nearest neighbors and their k nearest neighbors in the training set. Return the distance ratio. Greater than 1 means the pred data is far from the domain.
"""
if train_dset_pair_distance is None:
# calcualate the pair-wise distance of training set
train_dset_pair_distance = pairwise_distances(X=train_dset, metric=dist_metric)
# pairwise distance between train and pred set
pred_size = len(pred_dset)
train_pred_dis = pairwise_distances(X=pred_dset, Y=train_dset, metric=dist_metric)
pred_kmean_dis_local_density = np.zeros(pred_size)
for i in range(pred_size):
# find the index of k nearest neighbour of each prediction data
kn_idx = np.argpartition(train_pred_dis[i], k)
pred_km_dis = np.mean(train_pred_dis[i][kn_idx[:k]])
# find the neighbours of each neighbour and calculate the distance
neighbor_dis = []
for nei_ix in kn_idx[:k]:
nei_kn_idx = np.argpartition(train_dset_pair_distance[nei_ix], k)
neighbor_dis.append(np.mean(train_dset_pair_distance[nei_ix][nei_kn_idx[:k]]))
ave_nei_dis = np.mean(neighbor_dis)
if ave_nei_dis == 0:
ave_nei_dis = 1e-6
pred_kmean_dis_local_density[i] = pred_km_dis / ave_nei_dis
return pred_kmean_dis_local_density
# ---------------------------------------------
def build_tarball_name(dataset_name, model_uuid, result_dir=''):
""" format for building model tarball names
Creates the file name for a model tarball from dataset key and model_uuid
with optional result_dir.
Args:
dataset_name (str): The dataset_name used to train this model
model_uuid (str): The model_uuid assigned to this model
result_dir (str): Optional directory for this model
Returns:
The path or filename of the tarball for this model
"""
model_tarball_path = os.path.join(str(result_dir), "{}_model_{}.tar.gz".format(dataset_name, model_uuid))
return model_tarball_path
# ---------------------------------------------
def build_dataset_name(dataset_key):
""" Returns dataset_name when given dataset_key
Returns the dataset_name when given a dataset_key. Assumes that the dataset_name is a path
and ends with an extension
Args:
dataset_key (str): A dataset_key
Returns:
The dataset_name which is the base name stripped of extensions
"""
return os.path.splitext(os.path.basename(dataset_key))[0]
# ******************************************************************************************************************************
class ModelPipeline:
"""Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,
generate predictions for an input dataset, and generate performance metrics for these predictions.
Attributes:
Set in __init__:
params (argparse.Namespace): The argparse.Namespace parameter object
log (log): The logger
run_mode (str): A flag determine the mode of model pipeline (eg. training or prediction)
params.dataset_name (argparse.Namespace): The dataset_name parameter of the dataset
ds_client (ac.DatastoreClient): the datastore api token to interact with the datastore
perf_dict (dict): The performance dictionary
output_dir (str): The parent path of the model directory
mlmt_client: The mlmt service client
metric_type (str): Defines the type of metric (e.g. roc_auc_score, r2_score)
set in train_model or run_predictions:
run_mode (str): The mode to run the pipeline, set to training
featurziation (Featurization object): The featurization argument or the featurizatioin created from the
input parameters
model_wrapper (ModelWrapper objct): A model wrapper created from the parameters and featurization object.
set in create_model_metadata:
model_metadata (dict): The model metadata dictionary that stores the model metrics and metadata
Set in load_featurize_data
data (ModelDataset object): A data object that featurizes and splits the dataset
"""
def __init__(self, params, ds_client=None, mlmt_client=None):
"""Initializes ModelPipeline object.
Args:
params (Namespace object): contains all parameter information.
ds_client: datastore client.
mlmt_client: model tracker client.
Side effects:
Sets the following ModelPipeline attributes:
params (argparse.Namespace): The argparse.Namespace parameter object
log (log): The logger
run_mode (str): A flag determine the mode of model pipeline (eg. training or prediction)
params.dataset_name (argparse.Namespace): The dataset_name parameter of the dataset
ds_client (ac.DatastoreClient): the datastore api token to interact with the datastore
perf_dict (dict): The performance dictionary
output_dir (str): The parent path of the model directory.
mlmt_client: The mlmt service
metric_type (str): Defines the type of metric (e.g. roc_auc_score, r2_score)
"""
self.params = params
self.log = logging.getLogger('ATOM')
self.run_mode = 'training' # default, can be overridden later
self.start_time = time.time()
# if model is NN, set the uncertainty to False.
# https://github.com/deepchem/deepchem/issues/2422
if self.params.model_type == 'NN':
self.params.uncertainty = False
# Default dataset_name parameter from dataset_key
if params.dataset_name is None:
self.params.dataset_name = build_dataset_name(self.params.dataset_key)
self.ds_client = None
if params.datastore:
if ds_client is None:
self.ds_client = dsf.config_client()
else:
self.ds_client = ds_client
# Check consistency of task parameters
if type(params.response_cols) == str:
params.response_cols = [params.response_cols]
if params.num_model_tasks != len(params.response_cols):
raise ValueError("num_model_tasks parameter is inconsistent with response_cols")
if self.params.model_uuid is None:
self.params.model_uuid = str(uuid.uuid4())
if self.params.save_results:
self.mlmt_client = dsf.initialize_model_tracker()
self.perf_dict = {}
if self.params.prediction_type == 'regression':
if self.params.num_model_tasks > 1:
self.metric_type = 'mean-r2_score'
else:
self.metric_type = 'r2_score'
else:
if self.params.num_model_tasks > 1:
self.metric_type = 'mean-roc_auc_score'
else:
self.metric_type = 'roc_auc_score'
if self.params.output_dir is None:
self.params.output_dir = os.path.join(self.params.result_dir, self.params.dataset_name, '%s_%s_%s_%s' %
(
self.params.model_type,
self.params.featurizer,
self.params.splitter, self.params.prediction_type),
self.params.model_uuid)
if not os.path.isdir(self.params.output_dir):
os.makedirs(self.params.output_dir, exist_ok=True)
self.output_dir = self.params.output_dir
if self.params.model_tarball_path is None:
self.params.model_tarball_path = build_tarball_name(self.params.dataset_name, self.params.model_uuid, self.params.result_dir)
# ****************************************************************************************
def load_featurize_data(self):
"""Loads the dataset from the datastore or the file system and featurizes it. If we are training
a new model, split the dataset into training, validation and test sets.
The data is also split into training, validation, and test sets and saved to the filesystem or datastore.
Assumes a ModelWrapper object has already been created.
Side effects:
Sets the following attributes of the ModelPipeline
data (ModelDataset object): A data object that featurizes and splits the dataset
data.dataset(dc.DiskDataset): The transformed, featurized, and split dataset
"""
self.data = model_datasets.create_model_dataset(self.params, self.featurization, self.ds_client)
self.data.get_featurized_data()
if self.run_mode == 'training':
if not (self.params.previously_split and self.data.load_presplit_dataset()):
self.data.split_dataset()
self.data.save_split_dataset()
# We now create transformers after splitting, to allow for the case where the transformer
# is fitted to the training data only. The transformers are then applied to the training,
# validation and test sets separately.
if not self.params.split_only:
self.model_wrapper.create_transformers(self.data)
else:
self.run_mode = ''
if self.run_mode == 'training':
for i, (train, valid) in enumerate(self.data.train_valid_dsets):
train = self.model_wrapper.transform_dataset(train)
valid = self.model_wrapper.transform_dataset(valid)
self.data.train_valid_dsets[i] = (train, valid)
self.data.test_dset = self.model_wrapper.transform_dataset(self.data.test_dset)
# ****************************************************************************************
def create_model_metadata(self):
"""Initializes a data structure describing the current model, to be saved in the model zoo.
This should include everything necessary to reproduce a model run.
Side effect:
Sets self.model_metadata (dictionary): A dictionary of the model metadata required to recreate the model.
Also contains metadata about the generating dataset.
"""
if self.params.datastore:
dataset_metadata = dsf.get_keyval(dataset_key=self.params.dataset_key, bucket=self.params.bucket)
else:
dataset_metadata = {}
if 'dataset_hash' not in self.params:
self.params.dataset_hash=None
train_dset_data = dict(
datastore=self.params.datastore,
dataset_key=self.params.dataset_key,
bucket=self.params.bucket,
dataset_oid=self.data.dataset_oid,
dataset_hash=self.params.dataset_hash,
id_col=self.params.id_col,
smiles_col=self.params.smiles_col,
response_cols=self.params.response_cols,
feature_transform_type=self.params.feature_transform_type,
response_transform_type=self.params.response_transform_type,
external_export_parameters=dict(
result_dir=self.params.result_dir),
dataset_metadata=dataset_metadata
)
model_params = dict(
model_bucket=self.params.model_bucket,
system=self.params.system,
model_type=self.params.model_type,
featurizer=self.params.featurizer,
prediction_type=self.params.prediction_type,
model_choice_score_type=self.params.model_choice_score_type,
num_model_tasks=self.params.num_model_tasks,
transformers=self.params.transformers,
transformer_key=self.params.transformer_key,
transformer_bucket=self.params.transformer_bucket,
transformer_oid=self.params.transformer_oid,
uncertainty=self.params.uncertainty,
time_generated=time.time(),
save_results=self.params.save_results,
hyperparam_uuid=self.params.hyperparam_uuid,
ampl_version=ampl_version
)
splitting_metadata = self.data.get_split_metadata()
model_metadata = dict(
model_uuid=self.params.model_uuid,
time_built=time.time(),
model_parameters=model_params,
training_dataset=train_dset_data,
splitting_parameters=splitting_metadata
)
model_spec_metadata = self.model_wrapper.get_model_specific_metadata()
for key, data in model_spec_metadata.items():
model_metadata[key] = data
feature_specific_metadata = self.data.featurization.get_feature_specific_metadata(self.params)
for key, data in feature_specific_metadata.items():
model_metadata[key] = data
for key, data in trans.get_transformer_specific_metadata(self.params).items():
model_metadata[key] = data
self.model_metadata = model_metadata
# ****************************************************************************************
def save_model_metadata(self, retries=5, sleep_sec=60):
"""
Saves the data needed to reload the model in the model tracker DB or in a local tarball file.
Inserts the model metadata into the model tracker DB, if self.params.save_results is True.
Otherwise, saves the model metadata to a local .json file. Generates a gzipped tar archive
containing the metadata file, the transformer parameters and the model checkpoint files, and
saves it in the datastore or the filesystem according to the value of save_results.
Args:
retries (int): Number of times to retry saving to model tracker DB.
sleep_sec (int): Number of seconds to sleep between retries, if saving to model tracker DB.
Side effects:
Saves the model metadata and parameters into the model tracker DB or a local tarball file.
"""
# Dump the model parameters and metadata to a JSON file
out_file = os.path.join(self.output_dir, 'model_metadata.json')
with open(out_file, 'w') as out:
json.dump(self.model_metadata, out, sort_keys=True, indent=4, separators=(',', ': '))
out.write("\n")
if self.params.save_results:
# Model tracker saves the model state and metadata in the datastore as well as saving the metadata
# in the model zoo.
retry = True
i = 0
while retry:
if i < retries:
# TODO: Try to distinguish unrecoverable exceptions (e.g., model tracker is down) from ones for
# which retrying is worthwhile.
try:
trkr.save_model(self, collection_name=self.params.collection_name)
# Best model needs to be reloaded for predictions, so does not work to remove best_model_dir
retry = False
except:
raise
#self.log.warning("Need to sleep and retry saving model")
#time.sleep(sleep_sec)
#i += 1
else:
retry = False
else:
# If not using the model tracker, save the model state and metadata in a tarball in the filesystem
trkr.save_model_tarball(self.output_dir, self.params.model_tarball_path)
self.model_wrapper._clean_up_excess_files(self.model_wrapper.model_dir)
# ****************************************************************************************
def create_prediction_metadata(self, prediction_results):
"""Initializes a data structure to hold performance metrics from a model run on a new dataset,
to be stored in the model tracker DB. Note that this isn't used
for the training run metadata; the training_metrics section is created by the train_model() function.
Returns:
prediction_metadata (dict): A dictionary of the metadata for a model run on a new dataset.
"""
if self.params.datastore:
dataset_metadata = dsf.get_keyval(dataset_key=self.params.dataset_key, bucket=self.params.bucket)
else:
dataset_metadata = {}
prediction_metadata = dict(
metrics_type='prediction',
model_uuid=self.params.model_uuid,
time_run=time.time(),
dataset_key=self.params.dataset_key,
bucket=self.params.bucket,
dataset_oid=self.data.dataset_oid,
id_col=self.params.id_col,
smiles_col=self.params.smiles_col,
response_cols=self.params.response_cols,
prediction_results=prediction_results,
dataset_metadata=dataset_metadata
)
return prediction_metadata
# ****************************************************************************************
def get_metrics(self):
"""Retrieve the model performance metrics from any previous training and prediction runs
from the model tracker
"""
if self.params.save_results:
return list(trkr.get_metrics(self, collection_name=self.params.collection_name))
metrics = self.mlmt_client.get_model_metrics(collection_name=self.params.collection_name,
model_uuid=self.params.model_uuid).result()
return metrics
else:
# TODO: Eventually, may want to allow reading metrics from the JSON files saved by
# save_metrics(), in order to support installations without the model tracker.
self.log.warning("ModelPipeline.get_metrics() requires params.save_results = True")
return None
# ****************************************************************************************
def save_metrics(self, model_metrics, prefix=None, retries=5, sleep_sec=60):
"""Saves the given model_metrics dictionary to a JSON file on disk, and also to the model tracker
database if we're using it.
If writing to disk, outputs to a JSON file <prefix>_model_metrics.json in the current output directory.
Args:
model_metrics (dict or list): Either a dictionary containing the model performance metrics, or a
list of dictionaries with metrics for each training label and subset.
prefix (str): An optional prefix to include in the JSON filename
retries (int): Number of retries to save to model tracker DB, if save_results is True.
sleep_sec (int): Number of seconds to sleep between retries.
Side effects:
Saves the model_metrics dictionary to the model tracker database, or writes out a .json file
"""
# First save the metrics to disk
if prefix is None:
out_file = os.path.join(self.output_dir, 'model_metrics.json')
else:
out_file = os.path.join(self.output_dir, '%s_model_metrics.json' % prefix)
with open(out_file, 'w') as out:
json.dump(model_metrics, out, sort_keys=True, indent=4, separators=(',', ': '))
out.write("\n")
if self.params.save_results:
if type(model_metrics) != list:
model_metrics = [model_metrics]
for metrics in model_metrics:
retry = True
i = 0
while retry:
if i < retries:
try:
self.mlmt_client.save_metrics(collection_name=self.params.collection_name,
model_uuid=metrics['model_uuid'],
model_metrics=metrics)
retry = False
except:
raise
# TODO: uncomment when debugged
# TODO: Need to distinguish between "temporary" exceptions that justify
# retries and longer-term exceptions indicating that the model tracker server
# is down.
#self.log.warning("Need to sleep and retry saving metrics")
#time.sleep(sleep_sec)
#i += 1
else:
retry = False
# ****************************************************************************************
def split_dataset(self, featurization=None):
"""
Load, featurize and split the dataset according to the current model parameter settings,
but don't actually train a model. Returns the split_uuid for the dataset split.
Args:
featurization (Featurization object): An optional featurization object.
Return:
split_uuid (str): The unique identifier for the dataset split.
"""
self.run_mode = 'training'
self.params.split_only = True
self.params.previously_split = False
if featurization is None:
featurization = feat.create_featurization(self.params)
self.featurization = featurization
self.load_featurize_data()
return self.data.split_uuid
# ****************************************************************************************
def train_model(self, featurization=None):
"""Build model described by self.params on the training dataset described by self.params.
Generate predictions for the training, validation, and test datasets, and save the predictions and
performance metrics in the model results DB or in a JSON file.
Args:
featurization (Featurization object): An optional featurization object for creating models on a
prefeaturized dataset
Side effects:
Sets the following attributes of the ModelPipeline object
run_mode (str): The mode to run the pipeline, set to training
featurization (Featurization object): The featurization argument or the featurization created from the
input parameters
model_wrapper (ModelWrapper objct): A model wrapper created from the parameters and featurization object.
model_metadata (dict): The model metadata dictionary that stores the model metrics and metadata
"""
self.run_mode = 'training'
if self.params.model_type == "hybrid":
if self.params.featurizer in ["graphconv"]:
raise Exception("Hybrid model doesn't support GraphConv featurizer now.")
if len(self.params.response_cols) < 2:
raise Exception("The dataset of a hybrid model should have two response columns, one for activities, one for concentrations.")
if featurization is None:
featurization = feat.create_featurization(self.params)
self.featurization = featurization
## create model wrapper if not split_only
if not self.params.split_only:
self.model_wrapper = model_wrapper.create_model_wrapper(self.params, self.featurization, self.ds_client)
self.model_wrapper.setup_model_dirs()
self.load_featurize_data()
## return if split only
if self.params.split_only:
return
self.model_wrapper.train(self)
# Create the metadata for the trained model
self.create_model_metadata()
# Save the performance metrics for each training data subset, for the best epoch
training_metrics = []
for label in ['best']:
for subset in ['train', 'valid', 'test']:
training_dict = dict(
metrics_type='training',
label=label,
subset=subset)
training_dict['prediction_results'] = self.model_wrapper.get_pred_results(subset, label)
training_metrics.append(training_dict)
# Save the model metrics separately
for training_dict in training_metrics:
training_dict['model_uuid'] = self.params.model_uuid
training_dict['time_run'] = time.time()
training_dict['input_dataset'] = self.model_metadata['training_dataset']
self.save_metrics(training_metrics)
# Save the model metadata in the model tracker or the filesystem
self.model_metadata['training_metrics'] = training_metrics
self.save_model_metadata()
# ****************************************************************************************
def run_predictions(self, featurization=None):
"""Instantiate a previously trained model, and use it to run predictions on a new dataset.
Generate predictions for a specified dataset, and save the predictions and performance
metrics in the model results DB or in a JSON file.
Args:
featurization (Featurization Object): An optional featurization object for creating the model wrappr
Side effects:
Sets the following attributes of ModelPipeline:
run_mode (str): The mode to run the pipeline, set to prediction
featurization (Featurization object): The featurization argument or the featurization created from the
input parameters
model_wrapper (ModelWrapper object): A model wrapper created from the parameters and featurization object.
"""
self.run_mode = 'prediction'
if featurization is None:
featurization = feat.create_featurization(self.params)
self.featurization = featurization
# Load the dataset to run predictions on and featurize it
self.load_featurize_data()
# Run predictions on the full dataset
pred_results = self.model_wrapper.get_full_dataset_pred_results(self.data)
# Map the predictions, and metrics if requested, to the dictionary format used by
# the model tracker
prediction_metadata = self.create_prediction_metadata(pred_results)
# Get the metrics from previous prediction runs, if any, and append the new results to them
# in the model tracker DB
model_metrics = dict(
model_uuid=self.params.model_uuid,
metrics_type='prediction'
)
model_metrics.update(prediction_metadata)
self.save_metrics(model_metrics, 'prediction_%s' % self.params.dataset_name)
# ****************************************************************************************
def calc_train_dset_pair_dis(self, metric="euclidean"):
"""
Calculate the pairwise distance for training set compound feature vectors, needed for AD calculation.
"""
self.featurization = self.model_wrapper.featurization
self.load_featurize_data()
if len(self.data.train_valid_dsets) > 1:
# combine train and valid set for k-fold cv models
train_data = np.concatenate((self.data.train_valid_dsets[0][0].X, self.data.train_valid_dsets[0][1].X))
else:
train_data = self.data.train_valid_dsets[0][0].X
self.train_pair_dis = pairwise_distances(X=train_data, metric=metric)
self.train_pair_dis_metric = metric
# ****************************************************************************************
def predict_on_dataframe(self, dset_df, is_featurized=False, contains_responses=False, AD_method=None, k=5, dist_metric="euclidean"):
"""DEPRECATED
Call predict_full_dataset instead.
"""
self.log.warning("predict_on_dataframe is deprecated. Please call predict_full_dataset instead.")
result_df = self.predict_full_dataset(dset_df, is_featurized=is_featurized,
contains_responses=contains_responses, AD_method=AD_method, k=k,
dist_metric=dist_metric)
# Inside predict_full_dataset, prediction columns are generated using something like:
# for i, colname in enumerate(self.params.response_cols):
# result_df['%s_pred'%colname] = preds[:,i,0]
# predict_on_dataframe was only meant to handle single task models and so output
# columns were not prefixed with the response_col. Thus we need to remove the prefix
# for backwards compatibility
if len(self.params.response_cols)==1:
# currently the only columns that could have a response_col prefix
suffixes = ['pred', 'std', 'actual', 'prob']
rename_map = {}
colname = self.params.response_cols[0]
for suff in suffixes:
for c in result_df.columns:
if c.startswith('%s_%s'%(colname, suff)):
rename_map[c] = c[len(colname+'_'):] # chop off response_col_ prefix
# rename columns for backwards compatibility
result_df.rename(columns=rename_map, inplace=True)
return result_df
# ****************************************************************************************
def predict_on_smiles(self, smiles, verbose=False, AD_method=None, k=5, dist_metric="euclidean"):
"""Compute predicted responses from a pretrained model on a set of compounds given as a list of SMILES strings.
Args:
smiles (list): A list containting valid SMILES strings
verbose (boolean): A switch for disabling informational messages
AD_method (str): with default, Applicable domain (AD) index will not be calcualted, use
z_score or local_density to choose the method to calculate AD index.
k (int): number of the neareast neighbors to evaluate the AD index, default is 5.
dist_metric (str): distance metrics, valid values are 'cityblock', 'cosine', 'euclidean', 'jaccard', 'manhattan'
Returns:
res (DataFrame): Data frame indexed by compound IDs containing a column of SMILES
strings, with additional columns containing the predicted values for each response variable.
If the model was trained to predict uncertainties, the returned data frame will also
include standard deviation columns (named <response_col>_std) for each response variable.
The result data frame may not include all the compounds in the input dataset, because
the featurizer may not be able to featurize all of them.
"""
if not verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
logger = logging.getLogger('ATOM')
logger.setLevel(logging.CRITICAL)
sys.stdout = io.StringIO()
import warnings
warnings.simplefilter("ignore")
if len(self.params.response_cols) > 1:
raise Exception('Currently only single task models supported')
else:
task = self.params.response_cols[0]
df = pd.DataFrame({'compound_id': np.linspace(0, len(smiles) - 1, len(smiles), dtype=int),
self.params.smiles_col: smiles,
task: np.zeros(len(smiles))})
res = self.predict_on_dataframe(df, AD_method=AD_method, k=k, dist_metric=dist_metric)
sys.stdout = sys.__stdout__
return res
# ****************************************************************************************
def predict_full_dataset(self, dset_df, is_featurized=False, contains_responses=False, dset_params=None, AD_method=None, k=5, dist_metric="euclidean"):
"""
Compute predicted responses from a pretrained model on a set of compounds listed in
a data frame. The data frame should contain, at minimum, a column of compound IDs; if
SMILES strings are needed to compute features, they should be provided as well. Feature
columns may be provided as well. If response columns are included in the input, they will
be included in the output as well to facilitate performance metric calculations.
This function is similar to predict_on_dataframe, except that it supports multitask models,
and includes class probabilities in the output for classifier models.
Args:
dset_df (DataFrame): A data frame containing compound IDs (if the compounds are to be
featurized using descriptors) and/or SMILES strings (if the compounds are to be
featurized using ECFP fingerprints or graph convolution) and/or precomputed features.
The column names for the compound ID and SMILES columns should match id_col and smiles_col,
respectively, in the model parameters.
is_featurized (bool): True if dset_df contains precomputed feature columns. If so,
dset_df must contain *all* of the feature columns defined by the featurizer that was
used when the model was trained.
contains_responses (bool): True if dataframe contains response values
dset_params (Namespace): Parameters used to interpret dataset, including id_col, smiles_col,
and optionally, response_cols. If not provided, id_col, smiles_col and response_cols are
assumed to be same as in the pretrained model.
AD_method (str): with default, Applicable domain (AD) index will not be calcualted, use
z_score or local_density to choose the method to calculate AD index.
k (int): number of the neareast neighbors to evaluate the AD index, default is 5.
dist_metric (str): distance metrics, valid values are 'cityblock', 'cosine', 'euclidean', 'jaccard', 'manhattan'
Returns:
result_df (DataFrame): Data frame indexed by compound IDs containing a column of SMILES
strings, with additional columns containing the predicted values for each response variable.
If the model was trained to predict uncertainties, the returned data frame will also
include standard deviation columns (named <response_col>_std) for each response variable.
The result data frame may not include all the compounds in the input dataset, because
the featurizer may not be able to featurize all of them.
"""
self.run_mode = 'prediction'
self.featurization = self.model_wrapper.featurization
# Change the dataset ID, SMILES and response columns to match the ones in the current model
dset_df = dset_df.copy()
if dset_params is not None:
coldict = {
dset_params.id_col: self.params.id_col,
dset_params.smiles_col: self.params.smiles_col}
if contains_responses and (set(dset_params.response_cols) != set(self.params.response_cols)):
for i, col in enumerate(dset_params.response_cols):
coldict[col] = self.params.response_cols[i]
dset_df = dset_df.rename(columns=coldict)
# assign unique ids to each row
old_ids = dset_df[self.params.id_col].values
new_ids = list(range(len(dset_df)))
id_map = dict([(i, id) for i, id in zip(new_ids, old_ids)])
dset_df[self.params.id_col] = new_ids
self.data = model_datasets.create_minimal_dataset(self.params, self.featurization, contains_responses)
if not self.data.get_dataset_tasks(dset_df):
# Shouldn't happen
raise Exception("response_cols missing from model params")
self.data.get_featurized_data(dset_df, is_featurized)
self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset)
# Get the predictions and standard deviations, if calculated, as numpy arrays
preds, stds = self.model_wrapper.generate_predictions(self.data.dataset)
result_df = pd.DataFrame({self.params.id_col: self.data.attr.index.values,
self.params.smiles_col: self.data.attr[self.params.smiles_col].values})
if self.params.model_type != "hybrid":
if contains_responses:
for i, colname in enumerate(self.params.response_cols):
result_df["%s_actual" % colname] = self.data.vals[:,i]
for i, colname in enumerate(self.params.response_cols):
if self.params.prediction_type == 'regression':
result_df["%s_pred" % colname] = preds[:,i,0]
else:
class_probs = preds[:,i,:]
nclass = preds.shape[2]
if nclass == 2:
result_df["%s_prob" % colname] = class_probs[:,1]
else:
for k in range(nclass):
result_df["%s_prob_%d" % (colname, k)] = class_probs[:,k]
result_df["%s_pred" % colname] = np.argmax(class_probs, axis=1)
if self.params.uncertainty and self.params.prediction_type == 'regression':
for i, colname in enumerate(self.params.response_cols):
std_colname = '%s_std' % colname
result_df[std_colname] = stds[:,i,0]
else:
# hybrid model should handled differently
if contains_responses:
result_df["actual_activity"] = self.data.vals[:, 0]
result_df["concentration"] = self.data.vals[:, 1]
result_df["pred"] = preds[:, 0]
if AD_method is not None:
if self.featurization.feat_type != "graphconv":
pred_data = copy.deepcopy(self.data.dataset.X)
self.run_mode = 'training'
try:
print("Featurizing training data for AD calculation.")
self.load_featurize_data()
print("Calculating AD index.")
if len(self.data.train_valid_dsets) > 1:
# combine train and valid set for k-fold CV models
train_data = np.concatenate((self.data.train_valid_dsets[0][0].X, self.data.train_valid_dsets[0][1].X))
else:
train_data = self.data.train_valid_dsets[0][0].X
if not hasattr(self, "train_pair_dis") or not hasattr(self, "train_pair_dis_metric") or self.train_pair_dis_metric != dist_metric:
self.calc_train_dset_pair_dis(metric=dist_metric)
if AD_method == "local_density":
result_df["AD_index"] = calc_AD_kmean_local_density(train_data, pred_data, k, train_dset_pair_distance=self.train_pair_dis, dist_metric=dist_metric)
else:
result_df["AD_index"] = calc_AD_kmean_dist(train_data, pred_data, k, train_dset_pair_distance=self.train_pair_dis, dist_metric=dist_metric)
except:
print("Cannot find original training data, AD not calculated")
else:
self.log.warning("GraphConv features are not plain vectors, AD index cannot be calculated.")
# insert any missing ids
missing_ids = set(new_ids).difference(result_df[self.params.id_col])
for mi in missing_ids:
result_df.append({self.params.id_col:mi})
# sort in ascending order, recovering the original order
result_df.sort_values(by=[self.params.id_col], ascending=True, inplace=True)
# map back to original id values
result_df[self.params.id_col] = result_df[self.params.id_col].map(id_map)
return result_df
# ****************************************************************************************
def run_models(params, shared_featurization=None, generator=False):
"""Query the model tracker for models matching the criteria in params.model_filter. Run
predictions with each model using the dataset specified by the remaining parameters.
Args:
params (Namespace): Parsed parameters
shared_featurization (Featurization): Object to map compounds to features, shared across models.
User is responsible for ensuring that shared_featurization is compatible with all matching models.
generator (bool): True if run as a generator
"""
mlmt_client = dsf.initialize_model_tracker()
ds_client = dsf.config_client()
exclude_fields = [
"training_metrics",
"time_built",
"training_dataset.dataset_metadata"
]
query_params = {
'match_metadata': params.model_filter
}
metadata_iter = mlmt_client.get_models(
collection_name=params.collection_name,
query_params=query_params,
exclude_fields=exclude_fields,
count=True
)
model_count = next(metadata_iter)
if not model_count:
print("No matching models returned")
return
for metadata_dict in metadata_iter:
model_uuid = metadata_dict['model_uuid']
print("Got metadata for model UUID %s" % model_uuid)
# Parse the saved model metadata to obtain the parameters used to train the model
model_params = parse.wrapper(metadata_dict)
# Override selected model training data parameters with parameters for current dataset
model_params.model_uuid = model_uuid
model_params.collection_name = params.collection_name
model_params.datastore = True
model_params.save_results = True
model_params.dataset_key = params.dataset_key
model_params.bucket = params.bucket
model_params.dataset_oid = params.dataset_oid
model_params.system = params.system
model_params.id_col = params.id_col
model_params.smiles_col = params.smiles_col
model_params.result_dir = params.result_dir
model_params.model_filter = params.model_filter
# Create a separate output_dir under model_params.result_dir for each model. For lack of a better idea, use the model UUID
# to name the output dir, to ensure uniqueness.
model_params.output_dir = os.path.join(params.result_dir, model_uuid)
# Allow descriptor featurizer to use a different descriptor table than was used for the training data.
# This could be needed e.g. when a model was trained with GSK compounds and tested with ChEMBL data.
model_params.descriptor_key = params.descriptor_key
model_params.descriptor_bucket = params.descriptor_bucket
model_params.descriptor_oid = params.descriptor_oid
# If there is no shared featurization object, create one for this model
if shared_featurization is None:
featurization = feat.create_featurization(model_params)
else:
featurization = shared_featurization
# Create a ModelPipeline object
pipeline = ModelPipeline(model_params, ds_client, mlmt_client)
# Create the ModelWrapper object.
pipeline.model_wrapper = model_wrapper.create_model_wrapper(pipeline.params, featurization,
pipeline.ds_client)
# Get the tarball containing the saved model from the datastore, and extract it into model_dir.
model_dataset_oid = metadata_dict['model_parameters']['model_dataset_oid']
# TODO: Should we catch exceptions from retrieve_dataset_by_dataset_oid, or let them propagate?
model_dir = dsf.retrieve_dataset_by_dataset_oid(model_dataset_oid, client=ds_client, return_metadata=False,
nrows=None, print_metadata=False, sep=False,
tarpath=pipeline.model_wrapper.model_dir)
pipeline.log.info("Extracted model tarball to %s" % model_dir)
# If that worked, reload the saved model training state
pipeline.model_wrapper.reload_model(pipeline.model_wrapper.model_dir)
# Run predictions on the specified dataset
pipeline.run_predictions(featurization)
# Return the pipeline to the calling function, if run as a generator
if generator:
yield pipeline
# ****************************************************************************************
def regenerate_results(result_dir, params=None, metadata_dict=None, shared_featurization=None, system='twintron-blue'):
"""Query the model tracker for models matching the criteria in params.model_filter. Run
predictions with each model using the dataset specified by the remaining parameters.
Args:
result_dir (str): Parent of directory where result files will be written
params (Namespace): Parsed parameters
metadata_dict (dict): Model metadata
shared_featurization (Featurization): Object to map compounds to features, shared across models.
User is responsible for ensuring that shared_featurization is compatible with all matching models.
system (str): System name
Returns:
result_dict (dict): Results from predictions
"""
mlmt_client = dsf.initialize_model_tracker()
ds_client = dsf.config_client()
if metadata_dict is None:
if params is None:
print("Must either provide params or metadata_dict")
return
metadata_dict = trkr.get_metadata_by_uuid(params.model_uuid,
collection_name=params.collection_name)
if metadata_dict is None:
print("No matching models returned")
return
# Parse the saved model metadata to obtain the parameters used to train the model
model_params = parse.wrapper(metadata_dict)
model_params.model_uuid = metadata_dict['model_uuid']
model_params.datastore = True
dset_df = model_datasets.create_split_dataset_from_metadata(model_params, ds_client)
test_df = dset_df[dset_df.subset == 'test']
model_uuid = model_params.model_uuid
print("Got metadata for model UUID %s" % model_uuid)
model_params.result_dir = result_dir
# Create a separate output_dir under model_params.result_dir for each model. For lack of a better idea, use the model UUID
# to name the output dir, to ensure uniqueness.
model_params.output_dir = os.path.join(model_params.result_dir, model_uuid)
# Allow descriptor featurizer to use a different descriptor table than was used for the training data.
# This could be needed e.g. when a model was trained with GSK compounds and tested with ChEMBL data, or
# when running a model that was trained on LC on a non-LC system.
model_params.system = system
# Create a ModelPipeline object
pipeline = ModelPipeline(model_params, ds_client, mlmt_client)
# If there is no shared featurization object, create one for this model
if shared_featurization is None:
featurization = feat.create_featurization(model_params)
else:
featurization = shared_featurization
print("Featurization = %s" % str(featurization))
# Create the ModelWrapper object.
pipeline.model_wrapper = model_wrapper.create_model_wrapper(pipeline.params, featurization,
pipeline.ds_client)
# Get the tarball containing the saved model from the datastore, and extract it into model_dir (old format)
# or output_dir (new format) according to the format of the tarball contents.
extract_dir = trkr.extract_datastore_model_tarball(model_uuid, model_params.model_bucket, model_params.output_dir,
pipeline.model_wrapper.model_dir)
# If that worked, reload the saved model training state
pipeline.model_wrapper.reload_model(pipeline.model_wrapper.model_dir)
# Run predictions on the specified dataset
result_dict = pipeline.predict_on_dataframe(test_df, contains_responses=True)
result_dict['model_type'] = model_params.model_type
result_dict['featurizer'] = model_params.featurizer
result_dict['splitter'] = model_params.splitter
if 'descriptor_type' in model_params:
result_dict['descriptor_type'] = model_params.descriptor_type
return result_dict
# ****************************************************************************************
def create_prediction_pipeline(params, model_uuid, collection_name=None, featurization=None, alt_bucket='CRADA'):
"""Create a ModelPipeline object to be used for running blind predictions on datasets
where the ground truth is not known, given a pretrained model in the model tracker database.
Args:
params (Namespace or dict): A parsed parameters namespace, containing parameters describing how input
datasets should be processed. If a dictionary is passed, it will be parsed to fill in default values
and convert it to a Namespace object.
model_uuid (str): The UUID of a trained model.
collection_name (str): The collection where the model is stored in the model tracker DB.
featurization (Featurization): An optional featurization object to be used for featurizing the input data.
If none is provided, one will be created based on the stored model parameters.
alt_bucket (str): Alternative bucket to search for model tarball and transformer files, if
original bucket no longer exists.
Returns:
pipeline (ModelPipeline): A pipeline object to be used for making predictions.
"""
mlmt_client = dsf.initialize_model_tracker()
ds_client = dsf.config_client()
if collection_name is None:
collection_name = trkr.get_model_collection_by_uuid(model_uuid, mlmt_client)
if type(params) == dict:
params = parse.wrapper(params)
metadata_dict = trkr.get_metadata_by_uuid(model_uuid, collection_name=collection_name)
if not metadata_dict:
raise Exception("No model found with UUID %s in collection %s" % (model_uuid, collection_name))
print("Got metadata for model UUID %s" % model_uuid)
# Parse the saved model metadata to obtain the parameters used to train the model
model_params = parse.wrapper(metadata_dict)
# Override selected model training data parameters with parameters for current dataset
model_params.model_uuid = model_uuid
model_params.save_results = True
model_params.id_col = params.id_col
model_params.smiles_col = params.smiles_col
model_params.result_dir = params.result_dir
model_params.system = params.system
# Check that buckets where model tarball and transformers were saved still exist. If not, try alt_bucket.
model_bucket_meta = ds_client.ds_buckets.get_buckets(buckets=[model_params.model_bucket]).result()
if len(model_bucket_meta) == 0:
model_params.model_bucket = alt_bucket
if (model_params.transformer_bucket != model_params.model_bucket):
trans_bucket_meta = ds_client.ds_buckets.get_buckets(buckets=[model_params.transformer_bucket]).result()
if len(trans_bucket_meta) == 0:
model_params.transformer_bucket = alt_bucket
else:
if len(model_bucket_meta) == 0:
model_params.transformer_bucket = alt_bucket
# Create a separate output_dir under model_params.result_dir for each model. For lack of a better idea, use the model UUID
# to name the output dir, to ensure uniqueness.
model_params.output_dir = os.path.join(params.result_dir, model_uuid)
# Allow using computed_descriptors featurizer for a model trained with the descriptors featurizer, and vice versa
if (model_params.featurizer == 'descriptors' and params.featurizer == 'computed_descriptors') or (
model_params.featurizer == 'computed_descriptors' and params.featurizer == 'descriptors'):
model_params.featurizer = params.featurizer
# Allow descriptor featurizer to use a different descriptor table than was used for the training data.
# This could be needed e.g. when a model was trained with GSK compounds and tested with ChEMBL data.
model_params.descriptor_key = params.descriptor_key
model_params.descriptor_bucket = params.descriptor_bucket
model_params.descriptor_oid = params.descriptor_oid
# If the caller didn't provide a featurization object, create one for this model
if featurization is None:
featurization = feat.create_featurization(model_params)
# Create a ModelPipeline object
pipeline = ModelPipeline(model_params, ds_client, mlmt_client)
# Create the ModelWrapper object.
pipeline.model_wrapper = model_wrapper.create_model_wrapper(pipeline.params, featurization,
pipeline.ds_client)
if params.verbose:
pipeline.log.setLevel(logging.DEBUG)
else:
pipeline.log.setLevel(logging.CRITICAL)
# Get the tarball containing the saved model from the datastore, and extract it into model_dir or output_dir,
# depending on what style of tarball it is (old or new respectively)
extract_dir = trkr.extract_datastore_model_tarball(model_uuid, model_params.model_bucket, model_params.output_dir,
pipeline.model_wrapper.model_dir)
if extract_dir == model_params.output_dir:
# Model came from new style tarball
pipeline.model_wrapper.model_dir = os.path.join(model_params.output_dir, 'best_model')
# Reload the saved model training state
pipeline.model_wrapper.reload_model(pipeline.model_wrapper.model_dir)
return pipeline
# ****************************************************************************************
def create_prediction_pipeline_from_file(params, reload_dir, model_path=None, model_type='best_model', featurization=None,
verbose=True):
"""
Create a ModelPipeline object to be used for running blind predictions on datasets, given a pretrained model stored
in the filesystem. The model may be stored either as a gzipped tar archive or as a directory.
Args:
params (Namespace): A parsed parameters namespace, containing parameters describing how input
datasets should be processed.
reload_dir (str): The path to the parent directory containing the various model subdirectories
(e.g.: '/home/cdsw/model/delaney-processed/delaney-processed/pxc50_NN_graphconv_scaffold_regression/').
If reload_dir is None, then model_path must be specified. If both are specified, then the tar archive given
by model_path will be unpacked into reload_dir, possibly overwriting existing files in that directory.
model_path (str): Path to a gzipped tar archive containing the saved model metadata and parameters. If specified,
the tar archive is unpacked into reload_dir if that directory is given, or to a temporary directory otherwise.
model_type (str): Name of the subdirectory in reload_dir or in the tar archive where the trained model state parameters
should be loaded from.
featurization (Featurization): An optional featurization object to be used for featurizing the input data.
If none is provided, one will be created based on the stored model parameters.
Returns:
pipeline (ModelPipeline): A pipeline object to be used for making predictions.
"""
# Unpack the model tar archive if one is specified
if model_path is not None:
# if mismatch, it will raise an exception
matched = mu.check_version_compatible(model_path, ignore_check=False)
if reload_dir is None:
# Create a temporary directory
reload_dir = tempfile.mkdtemp()
else:
os.makedirs(reload_dir, exist_ok=True)
model_fp = tarfile.open(model_path, mode='r:gz')
model_fp.extractall(path=reload_dir)
model_fp.close()
elif reload_dir is None:
raise ValueError("Either reload_dir or model_path must be specified.")
# Opens the model_metadata.json file containing the reloaded model parameters
config_file_path = os.path.join(reload_dir, 'model_metadata.json')
with open(config_file_path) as f:
config = json.loads(f.read())
# Set the transformer_key parameter to point to the transformer pickle file we just extracted
try:
has_transformers = config['model_parameters']['transformers']
if has_transformers:
config['model_parameters']['transformer_key'] = "%s/transformers.pkl" % reload_dir
except KeyError:
pass
# Parse the saved model metadata to obtain the parameters used to train the model
model_params = parse.wrapper(config)
#print("Featurizer = %s" % model_params.featurizer)
# Override selected model training data parameters with parameters for current dataset
model_params.save_results = False
model_params.output_dir = reload_dir
if params is not None:
model_params.id_col = params.id_col
model_params.smiles_col = params.smiles_col
model_params.result_dir = params.result_dir
model_params.system = params.system
verbose = params.verbose
# Allow using computed_descriptors featurizer for a model trained with the descriptors featurizer, and vice versa
if (model_params.featurizer == 'descriptors' and params.featurizer == 'computed_descriptors') or (
model_params.featurizer == 'computed_descriptors' and params.featurizer == 'descriptors'):
model_params.featurizer = params.featurizer
# Allow descriptor featurizer to use a different descriptor table than was used for the training data.
# This could be needed e.g. when a model was trained with GSK compounds and tested with ChEMBL data.
model_params.descriptor_key = params.descriptor_key
model_params.descriptor_bucket = params.descriptor_bucket
model_params.descriptor_oid = params.descriptor_oid
# If the caller didn't provide a featurization object, create one for this model
if featurization is None:
featurization = feat.create_featurization(model_params)
print("Featurization = %s" % str(featurization))
# Create a ModelPipeline object
pipeline = ModelPipeline(model_params)
# Create the ModelWrapper object.
pipeline.model_wrapper = model_wrapper.create_model_wrapper(pipeline.params, featurization)
if verbose:
pipeline.log.setLevel(logging.DEBUG)
else:
pipeline.log.setLevel(logging.CRITICAL)
# Reload the saved model training state
model_dir = os.path.join(reload_dir, model_type)
# If that worked, reload the saved model training state
pipeline.model_wrapper.reload_model(model_dir)
return pipeline
# ****************************************************************************************
def load_from_tracker(model_uuid, collection_name=None, client=None, verbose=False, alt_bucket='CRADA'):
"""
DEPRECATED. Use the function create_prediction_pipeline() directly, or use the higher-level function
predict_from_model.predict_from_tracker_model().
Create a ModelPipeline object using the metadata in the model tracker.
Args:
model_uuid (str): The UUID of a trained model.
collection_name (str): The collection where the model is stored in the model tracker DB.
client : Ignored, for backward compatibility only
verbose (bool): A switch for disabling informational messages
alt_bucket (str): Alternative bucket to search for model tarball and transformer files, if
original bucket no longer exists.
Returns:
tuple of:
pipeline (ModelPipeline): A pipeline object to be used for making predictions.
pparams (Namespace): Parsed parameter namespace from the requested model.
"""
if not verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
logger = logging.getLogger('ATOM')
logger.setLevel(logging.CRITICAL)
sys.stdout = io.StringIO()
import warnings
warnings.simplefilter("ignore")
if collection_name is None:
collection_name = trkr.get_model_collection_by_uuid(model_uuid)
metadata_dict = trkr.get_metadata_by_uuid(model_uuid, collection_name=collection_name)
if not metadata_dict:
raise Exception("No model found with UUID %s in collection %s" % (model_uuid, collection_name))
print("Got metadata for model UUID %s" % model_uuid)
# Parse the saved model metadata to obtain the parameters used to train the model
pparams = parse.wrapper(metadata_dict)
# pparams.uncertainty = False
pparams.verbose = verbose
pparams.result_dir = tempfile.mkdtemp() # Redirect the untaring of the model to a temporary directory
model = create_prediction_pipeline(pparams, model_uuid, collection_name, alt_bucket=alt_bucket)
# model.params.uncertainty = False
if not verbose:
sys.stdout = sys.__stdout__
return (model, pparams)
# ****************************************************************************************
def ensemble_predict(model_uuids, collections, dset_df, labels=None, dset_params=None, splitters=None,
mt_client=None, aggregate="mean", contains_responses=False):
"""
Load a series of pretrained models and predict responses with each model; then aggregate
the predicted responses into one prediction per compound.
Args:
model_uuids (iterable of str): Sequence of UUIDs of trained models.
collections (str or iterable of str): The collection(s) where the models are stored in the
model tracker DB. If a single string, the same collection is assumed to contain all the models.
Otherwise, collections should be of the same length as model_uuids.
dset_df (DataFrame): Dataset to perform predictions on. Should contain compound IDs and
SMILES strings. May contain features.
labels (iterable of str): Optional suffixes for model-specific prediction column names.
If not provided, the columns are labeled 'pred_<uuid>' where <uuid> is the model UUID.
dset_params (Namespace): Parameters used to interpret dataset, including id_col and smiles_col.
If not provided, id_col and smiles_col are assumed to be same as in the pretrained model and
the same for all models.
mt_client: Ignored, for backward compatibility only.
aggregate (str): Method to be used to combine predictions.
Returns:
pred_df (DataFrame): Table with predicted responses from each model, plus the ensemble prediction.
"""
# Get the singleton MLMTClient instance
mlmt_client = dsf.initialize_model_tracker()
pred_df = None
if type(collections) == str:
collections = [collections] * len(model_uuids)
if labels is None:
labels = model_uuids
ok_labels = []
for i, (model_uuid, collection_name, label) in enumerate(zip(model_uuids, collections, labels)):
print("Loading model %s from collection %s" % (model_uuid, collection_name))
metadata_dict = trkr.get_metadata_by_uuid(model_uuid, collection_name=collection_name)
if not metadata_dict:
raise Exception("No model found with UUID %s in collection %s" % (model_uuid, collection_name))
print("Got metadata for model UUID %s" % model_uuid)
# Parse the saved model metadata to obtain the parameters used to train the model
model_pparams = parse.wrapper(metadata_dict)
# Override selected parameters
model_pparams.result_dir = tempfile.mkdtemp()
if splitters is not None:
if model_pparams.splitter != splitters[i]:
print("Replacing %s splitter in stored model with %s" % (model_pparams.splitter, splitters[i]))
model_pparams.splitter = splitters[i]
if dset_params is not None:
model_pparams.id_col = dset_params.id_col
model_pparams.smiles_col = dset_params.smiles_col
if contains_responses:
model_pparams.response_cols = dset_params.response_cols
pipe = create_prediction_pipeline(model_pparams, model_uuid, collection_name)
if pred_df is None:
initial_cols = [model_pparams.id_col, model_pparams.smiles_col]
if contains_responses:
initial_cols.extend(model_pparams.response_cols)
pred_df = dset_df[initial_cols].copy()
if contains_responses:
# Assume singletask model for now
pred_df = pred_df.rename(columns={model_pparams.response_cols[0]: 'actual'})
pipe.run_mode = 'prediction'
pipe.featurization = pipe.model_wrapper.featurization
pipe.data = model_datasets.create_minimal_dataset(pipe.params, pipe.featurization, contains_responses)
if not pipe.data.get_dataset_tasks(dset_df):
# Shouldn't happen - response_cols should already be set in saved model parameters
raise Exception("response_cols missing from model params")
is_featurized = (len(set(pipe.featurization.get_feature_columns()) - set(dset_df.columns.values)) == 0)
pipe.data.get_featurized_data(dset_df, is_featurized)
pipe.data.dataset = pipe.model_wrapper.transform_dataset(pipe.data.dataset)
# Create a temporary data frame to hold the compound IDs and predictions. The model may not
# return predictions for all the requested compounds, so we have to outer join the predictions
# to the existing data frame.
result_df = pd.DataFrame({model_pparams.id_col: pipe.data.attr.index.values})
# Get the predictions and standard deviations, if calculated, as numpy arrays
try:
preds, stds = pipe.model_wrapper.generate_predictions(pipe.data.dataset)
except ValueError:
print("\n***** Prediction failed for model %s %s\n" % (label, model_uuid))
continue
i = 0
if pipe.params.prediction_type == 'regression':
result_df["pred_%s" % label] = preds[:, i, 0]
else:
# Assume binary classifier for now. We're going to aggregate the probabilities for class 1.
result_df["pred_%s" % label] = preds[:, i, 1]
if pipe.params.uncertainty and pipe.params.prediction_type == 'regression':
std_colname = 'std_%s' % label
result_df[std_colname] = stds[:, i, 0]
pred_df = pred_df.merge(result_df, how='left', on=model_pparams.id_col)
ok_labels.append(label)
# Aggregate the ensemble of predictions
pred_cols = ["pred_%s" % label for label in ok_labels]
pred_vals = pred_df[pred_cols].values
if aggregate == 'mean':
agg_pred = np.nanmean(pred_vals, axis=1)
elif aggregate == 'median':
agg_pred = np.nanmedian(pred_vals, axis=1)
elif aggregate == 'max':
agg_pred = np.nanmax(pred_vals, axis=1)
elif aggregate == 'min':
agg_pred = np.nanmin(pred_vals, axis=1)
elif aggregate == 'weighted':
std_cols = ["std_%s" % label for label in ok_labels]
std_vals = pred_df[std_cols].values
if len(set(std_cols) - set(pred_df.columns.values)) > 0:
raise Exception("Weighted ensemble needs uncertainties for all component models.")
if np.any(std_vals == 0.0):
raise Exception("Can't compute weighted ensemble because some standard deviations are zero")
agg_pred = np.nansum(pred_vals / std_vals, axis=1) / np.nansum(1.0 / std_vals, axis=1)
else:
raise ValueError("Unknown aggregate value %s" % aggregate)
if pipe.params.prediction_type == 'regression':
pred_df["ensemble_pred"] = agg_pred
else:
pred_df["ensemble_class_prob"] = agg_pred
pred_df["ensemble_pred"] = [int(p >= 0.5) for p in agg_pred]
print("Done with ensemble prediction")
return pred_df
# ****************************************************************************************
def retrain_model(model_uuid, collection_name=None, result_dir=None, mt_client=None, verbose=True):
"""Obtain model parameters from the metadata in the model tracker, given the model_uuid,
and train a new model using exactly the same parameters (except for result_dir). Returns
the resulting ModelPipeline object. The pipeline object can then be used as input for
performance plots and other analyses that can't be done using just the metrics stored
in the model tracker; or to make predictions on new data.
Args:
model_uuid (str): The UUID of a trained model.
collection_name (str): The collection where the model is stored in the model tracker DB.
result_dir (str): The directory of model results when the model tracker is not available.
mt_client : Ignored
verbose (bool): A switch for disabling informational messages
Returns:
pipeline (ModelPipeline): A pipeline object containing data from the model training.
"""
if not result_dir:
mlmt_client = dsf.initialize_model_tracker()
print("Loading model %s from collection %s" % (model_uuid, collection_name))
metadata_dict = trkr.get_metadata_by_uuid(model_uuid, collection_name=collection_name)
if not metadata_dict:
raise Exception("No model found with UUID %s in collection %s" % (model_uuid, collection_name))
else:
for dirpath, dirnames, filenames in os.walk(result_dir):
if model_uuid in dirnames:
model_dir = os.path.join(dirpath, model_uuid)
break
with open(os.path.join(model_dir, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
print("Got metadata for model UUID %s" % model_uuid)
# Parse the saved model metadata to obtain the parameters used to train the model
model_pparams = parse.wrapper(metadata_dict)
model_pparams.result_dir = tempfile.mkdtemp()
# TODO: This is a hack; possibly the datastore parameter isn't being stored in the metadata?
model_pparams.datastore = True if not result_dir else False
pipe = ModelPipeline(model_pparams)
pipe.train_model()
return pipe
# ****************************************************************************************
def main():
"""Entry point when script is run from a shell"""
params = parse.wrapper(sys.argv[1:])
# print(params)
# model_filter parameter determines whether you are loading pretrained models and running
# predictions on them, or training a new model
if 'model_filter' in params.__dict__ and params.model_filter is not None:
# DEPRECATED: This feature isn't used by anyone as far as I know; it will be removed in
# the near future.
run_models(params)
elif params.split_only:
params.verbose = False
mp = ModelPipeline(params)
split_uuid = mp.split_dataset()
print(split_uuid)
else:
print("Running model pipeline")
logging.basicConfig(format='%(asctime)-15s %(message)s')
logger = logging.getLogger('ATOM')
if params.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.CRITICAL)
mp = ModelPipeline(params)
mp.train_model()
mp.log.warn("Dataset size: {}".format(mp.data.dataset.get_shape()[0][0]))
# -----------------------------------------------------------------------------------------------------
if __name__ == '__main__' and len(sys.argv) > 1:
main()
sys.exit(0)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
grant.py
|
import sys
import logging
import os
import gdb
import fc
# %PROPY% grant.py BUILDING MSCHELL N
if __name__ == '__main__':
ptargetsdeconn = os.environ['SDEFILE']
ptargetfcname = sys.argv[1]
pgrantees = sys.argv[2]
pviewer = sys.argv[3] # read only Y or N
timestr = time.strftime("%Y%m%d-%H%M%S")
targetlog = os.path.join(os.environ['TARGETLOGDIR']
,'grant-{0}-{1}.log'.format(ptargetfcname, timestr))
# encoding not available at this python encoding='utf-8'
logging.basicConfig(filename=targetlog
,level=logging.INFO)
targetgdb = gdb.Gdb()
targetfc = fc.Fc(targetgdb
,ptargetfcname)
for grantee in pgrantees.strip().split(','):
logging.info('granting privileges on {0} to {1}'.format(ptargetfcname
,grantee))
if pviewer != 'Y':
output = targetfc.grantprivileges(grantee)
else:
output = targetfc.grantprivileges(grantee
,'AS_IS')
|
[] |
[] |
[
"SDEFILE",
"TARGETLOGDIR"
] |
[]
|
["SDEFILE", "TARGETLOGDIR"]
|
python
| 2 | 0 | |
test/temp/temp_testing_channel.py
|
# -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
from topopy import Network, Channel, Flow, BNetwork
from osgeo import ogr
import numpy as np
#def rivers_to_channels(path, net, idfield=""):
path = "../data/in/jebja_channels.shp"
idfield=""
net = Network("../data/in/jebja30_net.dat")
# Open river shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataset = driver.Open(path)
layer = dataset.GetLayer()
geom_type = layer.GetGeomType()
lydef = layer.GetLayerDefn()
id_fld = lydef.GetFieldIndex(idfield)
points = []
for feat in layer:
geom = feat.GetGeometryRef()
if geom.GetGeometryCount() > 1:
continue
head = geom.GetPoint(0)
mouth = geom.GetPoint(geom.GetPointCount()- 1)
points.append([head, mouth])
canales = []
for canal in points:
head = canal[0]
mouth = canal[1]
canales.append(net.get_channel(head, mouth))
import matplotlib.pyplot as plt
canal = canales[0]
fig = plt.figure()
ax = fig.add_subplot(111)
dir(canal)
canal.get_xy()
for canal in canales:
xy = canal.get_xy()
ax.plot(xy[:,0], xy[:,1])
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tests/testapp/testapp/settings.py
|
"""
Django settings for testapp project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_PATH = os.path.abspath(os.path.dirname(__file__))
MAP_WIDGETS_PATH = os.path.normpath(os.path.join(SITE_PATH, '..', '..', '..'))
if MAP_WIDGETS_PATH not in sys.path:
sys.path.insert(0, MAP_WIDGETS_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o6b2c!r921-+^h7jlm&4x#sn53qwfif+@8(!4b*csitx+69b=5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'mapwidgets',
'widgets'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mapwidget_db',
'USER': 'mapwidgetdbu',
'PASSWORD': 'mapwidgetdbu',
'HOST': 'postgres',
'PORT': '5432',
}
}
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/uploads/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
SITE_DOMAIN = 'django'
TESTING = sys.argv[1:2] == ['test']
GOOGLE_MAP_API_KEY = os.environ.get('GOOGLE_MAP_API_KEY')
try:
from tests.testapp.testapp.settings_local import *
except:
pass
|
[] |
[] |
[
"GOOGLE_MAP_API_KEY"
] |
[]
|
["GOOGLE_MAP_API_KEY"]
|
python
| 1 | 0 | |
cmd/tailscaled/debug.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptrace"
"net/url"
"os"
"strings"
"time"
"inet.af/netaddr"
"tailscale.com/derp/derphttp"
"tailscale.com/ipn"
"tailscale.com/net/interfaces"
"tailscale.com/net/portmapper"
"tailscale.com/net/tshttpproxy"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/wgengine/monitor"
)
var debugArgs struct {
ifconfig bool // print network state once and exit
monitor bool
getURL string
derpCheck string
portmap bool
}
var debugModeFunc = debugMode // so it can be addressable
func debugMode(args []string) error {
fs := flag.NewFlagSet("debug", flag.ExitOnError)
fs.BoolVar(&debugArgs.ifconfig, "ifconfig", false, "If true, print network interface state")
fs.BoolVar(&debugArgs.monitor, "monitor", false, "If true, run link monitor forever. Precludes all other options.")
fs.BoolVar(&debugArgs.portmap, "portmap", false, "If true, run portmap debugging. Precludes all other options.")
fs.StringVar(&debugArgs.getURL, "get-url", "", "If non-empty, fetch provided URL.")
fs.StringVar(&debugArgs.derpCheck, "derp", "", "if non-empty, test a DERP ping via named region code")
if err := fs.Parse(args); err != nil {
return err
}
if len(fs.Args()) > 0 {
return errors.New("unknown non-flag debug subcommand arguments")
}
ctx := context.Background()
if debugArgs.derpCheck != "" {
return checkDerp(ctx, debugArgs.derpCheck)
}
if debugArgs.ifconfig {
return runMonitor(ctx, false)
}
if debugArgs.monitor {
return runMonitor(ctx, true)
}
if debugArgs.portmap {
return debugPortmap(ctx)
}
if debugArgs.getURL != "" {
return getURL(ctx, debugArgs.getURL)
}
return errors.New("only --monitor is available at the moment")
}
func runMonitor(ctx context.Context, loop bool) error {
dump := func(st *interfaces.State) {
j, _ := json.MarshalIndent(st, "", " ")
os.Stderr.Write(j)
}
mon, err := monitor.New(log.Printf)
if err != nil {
return err
}
mon.RegisterChangeCallback(func(changed bool, st *interfaces.State) {
if !changed {
log.Printf("Link monitor fired; no change")
return
}
log.Printf("Link monitor fired. New state:")
dump(st)
})
if loop {
log.Printf("Starting link change monitor; initial state:")
}
dump(mon.InterfaceState())
if !loop {
return nil
}
mon.Start()
log.Printf("Started link change monitor; waiting...")
select {}
}
func getURL(ctx context.Context, urlStr string) error {
if urlStr == "login" {
urlStr = "https://login.tailscale.com"
}
log.SetOutput(os.Stdout)
ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{
GetConn: func(hostPort string) { log.Printf("GetConn(%q)", hostPort) },
GotConn: func(info httptrace.GotConnInfo) { log.Printf("GotConn: %+v", info) },
DNSStart: func(info httptrace.DNSStartInfo) { log.Printf("DNSStart: %+v", info) },
DNSDone: func(info httptrace.DNSDoneInfo) { log.Printf("DNSDoneInfo: %+v", info) },
TLSHandshakeStart: func() { log.Printf("TLSHandshakeStart") },
TLSHandshakeDone: func(cs tls.ConnectionState, err error) { log.Printf("TLSHandshakeDone: %+v, %v", cs, err) },
WroteRequest: func(info httptrace.WroteRequestInfo) { log.Printf("WroteRequest: %+v", info) },
})
req, err := http.NewRequestWithContext(ctx, "GET", urlStr, nil)
if err != nil {
return fmt.Errorf("http.NewRequestWithContext: %v", err)
}
proxyURL, err := tshttpproxy.ProxyFromEnvironment(req)
if err != nil {
return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err)
}
log.Printf("proxy: %v", proxyURL)
tr := &http.Transport{
Proxy: func(*http.Request) (*url.URL, error) { return proxyURL, nil },
ProxyConnectHeader: http.Header{},
DisableKeepAlives: true,
}
if proxyURL != nil {
auth, err := tshttpproxy.GetAuthHeader(proxyURL)
if err == nil && auth != "" {
tr.ProxyConnectHeader.Set("Proxy-Authorization", auth)
}
const truncLen = 20
if len(auth) > truncLen {
auth = fmt.Sprintf("%s...(%d total bytes)", auth[:truncLen], len(auth))
}
log.Printf("tshttpproxy.GetAuthHeader(%v) for Proxy-Auth: = %q, %v", proxyURL, auth, err)
}
res, err := tr.RoundTrip(req)
if err != nil {
return fmt.Errorf("Transport.RoundTrip: %v", err)
}
defer res.Body.Close()
return res.Write(os.Stdout)
}
func checkDerp(ctx context.Context, derpRegion string) error {
req, err := http.NewRequestWithContext(ctx, "GET", ipn.DefaultControlURL+"/derpmap/default", nil)
if err != nil {
return fmt.Errorf("create derp map request: %w", err)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("fetch derp map failed: %w", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
if err != nil {
return fmt.Errorf("fetch derp map failed: %w", err)
}
if res.StatusCode != 200 {
return fmt.Errorf("fetch derp map: %v: %s", res.Status, b)
}
var dmap tailcfg.DERPMap
if err = json.Unmarshal(b, &dmap); err != nil {
return fmt.Errorf("fetch DERP map: %w", err)
}
getRegion := func() *tailcfg.DERPRegion {
for _, r := range dmap.Regions {
if r.RegionCode == derpRegion {
return r
}
}
for _, r := range dmap.Regions {
log.Printf("Known region: %q", r.RegionCode)
}
log.Fatalf("unknown region %q", derpRegion)
panic("unreachable")
}
priv1 := key.NewPrivate()
priv2 := key.NewPrivate()
c1 := derphttp.NewRegionClient(priv1, log.Printf, getRegion)
c2 := derphttp.NewRegionClient(priv2, log.Printf, getRegion)
c2.NotePreferred(true) // just to open it
m, err := c2.Recv()
log.Printf("c2 got %T, %v", m, err)
t0 := time.Now()
if err := c1.Send(priv2.Public(), []byte("hello")); err != nil {
return err
}
fmt.Println(time.Since(t0))
m, err = c2.Recv()
log.Printf("c2 got %T, %v", m, err)
if err != nil {
return err
}
log.Printf("ok")
return err
}
func debugPortmap(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
portmapper.VerboseLogs = true
switch os.Getenv("TS_DEBUG_PORTMAP_TYPE") {
case "":
case "pmp":
portmapper.DisablePCP = true
portmapper.DisableUPnP = true
case "pcp":
portmapper.DisablePMP = true
portmapper.DisableUPnP = true
case "upnp":
portmapper.DisablePCP = true
portmapper.DisablePMP = true
default:
log.Fatalf("TS_DEBUG_PORTMAP_TYPE must be one of pmp,pcp,upnp")
}
done := make(chan bool, 1)
var c *portmapper.Client
logf := log.Printf
c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), func() {
logf("portmapping changed.")
logf("have mapping: %v", c.HaveMapping())
if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok {
logf("cb: mapping: %v", ext)
select {
case done <- true:
default:
}
return
}
logf("cb: no mapping")
})
linkMon, err := monitor.New(logger.WithPrefix(logf, "monitor: "))
if err != nil {
return err
}
gatewayAndSelfIP := func() (gw, self netaddr.IP, ok bool) {
if v := os.Getenv("TS_DEBUG_GW_SELF"); strings.Contains(v, "/") {
i := strings.Index(v, "/")
gw = netaddr.MustParseIP(v[:i])
self = netaddr.MustParseIP(v[i+1:])
return gw, self, true
}
return linkMon.GatewayAndSelfIP()
}
c.SetGatewayLookupFunc(gatewayAndSelfIP)
gw, selfIP, ok := gatewayAndSelfIP()
if !ok {
logf("no gateway or self IP; %v", linkMon.InterfaceState())
return nil
}
logf("gw=%v; self=%v", gw, selfIP)
uc, err := net.ListenPacket("udp", "0.0.0.0:0")
if err != nil {
return err
}
defer uc.Close()
c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port))
res, err := c.Probe(ctx)
if err != nil {
return fmt.Errorf("Probe: %v", err)
}
logf("Probe: %+v", res)
if !res.PCP && !res.PMP && !res.UPnP {
logf("no portmapping services available")
return nil
}
if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok {
logf("mapping: %v", ext)
} else {
logf("no mapping")
}
select {
case <-done:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
|
[
"\"TS_DEBUG_PORTMAP_TYPE\"",
"\"TS_DEBUG_GW_SELF\""
] |
[] |
[
"TS_DEBUG_PORTMAP_TYPE",
"TS_DEBUG_GW_SELF"
] |
[]
|
["TS_DEBUG_PORTMAP_TYPE", "TS_DEBUG_GW_SELF"]
|
go
| 2 | 0 | |
test/python/WMComponent_t/JobSubmitter_t/JobSubmitter_t.py
|
#!/bin/env python
"""
_JobSubmitter_t_
JobSubmitter unit-test, uses the MockPlugin to submit and tests
the different dynamics that occur inside the JobSubmitter.
"""
from __future__ import print_function
from builtins import range
import cProfile
import os
import pickle
import pstats
import threading
import time
import unittest
from Utils.PythonVersion import PY3
from WMCore_t.WMSpec_t.TestSpec import testWorkload
from nose.plugins.attrib import attr
from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller
from WMCore.Agent.HeartbeatAPI import HeartbeatAPI
from WMCore.DAOFactory import DAOFactory
from WMCore.JobStateMachine.ChangeState import ChangeState
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.Services.CRIC.CRIC import CRIC
from WMCore.Services.UUIDLib import makeUUID
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Job import Job
from WMCore.WMBS.JobGroup import JobGroup
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.WMBase import getTestBase
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMQuality.Emulators import EmulatorSetup
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMQuality.Emulators.LogDB.MockLogDB import MockLogDB
from WMQuality.TestInitCouchApp import TestInitCouchApp as TestInit
class JobSubmitterTest(EmulatedUnitTestCase):
"""
_JobSubmitterTest_
Test class for the JobSubmitterPoller
"""
def setUp(self):
"""
_setUp_
Standard setup: Now with 100% more couch
"""
super(JobSubmitterTest, self).setUp()
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(
customModules=["WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl", "WMCore.Agent.Database"])
self.testInit.setupCouch("jobsubmitter_t/jobs", "JobDump")
self.testInit.setupCouch("jobsubmitter_t/fwjrs", "FWJRDump")
self.testInit.setupCouch("wmagent_summary_t", "WMStats")
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.baDaoFactory = DAOFactory(package="WMCore.BossAir",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.testDir = self.testInit.generateWorkDir()
# Set heartbeat
self.componentName = 'JobSubmitter'
self.heartbeatAPI = HeartbeatAPI(self.componentName)
self.heartbeatAPI.registerComponent()
self.configFile = EmulatorSetup.setupWMAgentConfig()
config = self.getConfig()
myThread.logdbClient = MockLogDB(config.General.central_logdb_url,
config.Agent.hostName, logger=None)
if PY3:
self.assertItemsEqual = self.assertCountEqual
return
def tearDown(self):
"""
_tearDown_
Standard tearDown
"""
myThread = threading.currentThread()
self.testInit.clearDatabase()
self.testInit.delWorkDir()
self.testInit.tearDownCouch()
EmulatorSetup.deleteConfig(self.configFile)
myThread.logdbClient = None
return
def setResourceThresholds(self, site, **options):
"""
_setResourceThresholds_
Utility to set resource thresholds
"""
if not options:
options = {'state': 'Normal',
'runningSlots': 10,
'pendingSlots': 5,
'tasks': ['Processing', 'Merge'],
'Processing': {'pendingSlots': 5,
'runningSlots': 10},
'Merge': {'pendingSlots': 2,
'runningSlots': 5}}
resourceControl = ResourceControl()
resourceControl.insertSite(siteName=site, pnn='se.%s' % (site),
ceName=site, plugin="MockPlugin", pendingSlots=options['pendingSlots'],
runningSlots=options['runningSlots'], cmsName=site)
for task in options['tasks']:
resourceControl.insertThreshold(siteName=site, taskType=task,
maxSlots=options[task]['runningSlots'],
pendingSlots=options[task]['pendingSlots'])
if options.get('state'):
resourceControl.changeSiteState(site, options.get('state'))
return
def createJobGroups(self, nSubs, nJobs, task, workloadSpec, site,
taskType='Processing', name=None, wfPrio=1, changeState=None):
"""
_createJobGroups_
Creates a series of jobGroups for submissions
changeState is an instance of the ChangeState class to make job status changes
"""
jobGroupList = []
if name is None:
name = makeUUID()
testWorkflow = Workflow(spec=workloadSpec, owner="tapas",
name=name, task="basicWorkload/Production",
priority=wfPrio)
testWorkflow.create()
# Create subscriptions
for _ in range(nSubs):
name = makeUUID()
# Create Fileset, Subscription, jobGroup
testFileset = Fileset(name=name)
testFileset.create()
testSubscription = Subscription(fileset=testFileset,
workflow=testWorkflow,
type=taskType,
split_algo="FileBased")
testSubscription.create()
testJobGroup = JobGroup(subscription=testSubscription)
testJobGroup.create()
# Create jobs
self.makeNJobs(name=name, task=task,
nJobs=nJobs,
jobGroup=testJobGroup,
fileset=testFileset,
sub=testSubscription.exists(),
site=site)
testFileset.commit()
testJobGroup.commit()
jobGroupList.append(testJobGroup)
if changeState:
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
return jobGroupList
def makeNJobs(self, name, task, nJobs, jobGroup, fileset, sub, site):
"""
_makeNJobs_
Make and return a WMBS Job and File
This handles all those damn add-ons
"""
# Set the CacheDir
cacheDir = os.path.join(self.testDir, 'CacheDir')
for n in range(nJobs):
# First make a file
# site = self.sites[0]
testFile = File(lfn="/singleLfn/%s/%s" % (name, n),
size=1024, events=10)
fileset.addFile(testFile)
fileset.commit()
location = None
if isinstance(site, list):
if len(site) > 0:
location = site[0]
else:
location = site
index = 0
for f in fileset.files:
index += 1
testJob = Job(name='%s-%i' % (name, index))
testJob.addFile(f)
testJob["location"] = location
testJob["possiblePSN"] = set(site) if isinstance(site, list) else set([site])
testJob['task'] = task.getPathName()
testJob['sandbox'] = task.data.input.sandbox
testJob['spec'] = os.path.join(self.testDir, 'basicWorkload.pcl')
testJob['mask']['FirstEvent'] = 101
testJob['priority'] = 101
testJob['numberOfCores'] = 1
testJob['requestType'] = 'ReReco'
jobCache = os.path.join(cacheDir, 'Sub_%i' % (sub), 'Job_%i' % (index))
os.makedirs(jobCache)
testJob.create(jobGroup)
testJob['cache_dir'] = jobCache
testJob.save()
jobGroup.add(testJob)
output = open(os.path.join(jobCache, 'job.pkl'), 'wb')
pickle.dump(testJob, output)
output.close()
return testJob, testFile
def getConfig(self):
"""
_getConfig_
Gets a basic config from default location
"""
config = self.testInit.getConfiguration()
self.testInit.generateWorkDir(config)
config.component_("Agent")
config.Agent.WMSpecDirectory = self.testDir
config.Agent.agentName = 'testAgent'
config.Agent.hostName = 'testAgent'
config.Agent.componentName = self.componentName
config.Agent.useHeartbeat = False
config.Agent.isDocker = False
# First the general stuff
config.section_("General")
config.General.workDir = os.getenv("TESTDIR", self.testDir)
config.General.central_logdb_url = "http://localhost/testlogdb"
config.General.ReqMgr2ServiceURL = "http://localhost/reqmgr2"
# Now the CoreDatabase information
config.section_("CoreDatabase")
config.CoreDatabase.connectUrl = os.getenv("DATABASE")
config.CoreDatabase.socket = os.getenv("DBSOCK")
# BossAir and MockPlugin configuration
config.section_("BossAir")
config.BossAir.pluginNames = ['MockPlugin']
# Here Test the CondorPlugin instead of MockPlugin
# config.BossAir.pluginNames = ['CondorPlugin']
config.BossAir.pluginDir = 'WMCore.BossAir.Plugins'
config.BossAir.nCondorProcesses = 1
config.BossAir.section_("MockPlugin")
config.BossAir.MockPlugin.fakeReport = os.path.join(getTestBase(),
'WMComponent_t/JobSubmitter_t',
"submit.sh")
# JobSubmitter configuration
config.component_("JobSubmitter")
config.JobSubmitter.logLevel = 'DEBUG'
config.JobSubmitter.maxThreads = 1
config.JobSubmitter.pollInterval = 10
config.JobSubmitter.submitScript = os.path.join(getTestBase(),
'WMComponent_t/JobSubmitter_t',
'submit.sh')
config.JobSubmitter.componentDir = os.path.join(self.testDir, 'Components')
config.JobSubmitter.workerThreads = 2
config.JobSubmitter.jobsPerWorker = 200
config.JobSubmitter.drainGraceTime = 2 # in seconds
# JobStateMachine
config.component_('JobStateMachine')
config.JobStateMachine.couchurl = os.getenv('COUCHURL')
config.JobStateMachine.couchDBName = "jobsubmitter_t"
config.JobStateMachine.jobSummaryDBName = 'wmagent_summary_t'
# Needed, because this is a test
try:
os.makedirs(config.JobSubmitter.componentDir)
except:
pass
return config
def createTestWorkload(self, name='workloadTest'):
"""
_createTestWorkload_
Creates a test workload for us to run on, hold the basic necessities.
"""
workload = testWorkload()
taskMaker = TaskMaker(workload, os.path.join(self.testDir, name))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
self.workloadSpecPath = os.path.join(self.testDir, name,
"%s/WMSandbox/WMWorkload.pkl" % name)
return workload
def testA_BasicTest(self):
"""
Use the MockPlugin to create a simple test
Check to see that all the jobs were "submitted",
don't care about thresholds
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 20
site = "T2_US_UCSD"
self.setResourceThresholds(site, pendingSlots=50, runningSlots=100, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 50, 'runningSlots': 100},
Merge={'pendingSlots': 50, 'runningSlots': 100})
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Do pre-submit check
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter = JobSubmitterPoller(config=config)
jobSubmitter.algorithm()
# Check that jobs are in the right state
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Check assigned locations
getLocationAction = self.daoFactory(classname="Jobs.GetLocation")
for jobId in result:
loc = getLocationAction.execute(jobid=jobId)
self.assertEqual(loc, [['T2_US_UCSD']])
# Run another cycle, it shouldn't submit anything. There isn't anything to submit
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
nSubs = 1
nJobs = 10
# Submit another 10 jobs
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType="Merge")
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Check that the jobs are available for submission and run another cycle
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter.algorithm()
# Check that the last 10 jobs were submitted as well.
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state='Executing', jobType="Merge")
self.assertEqual(len(result), nSubs * nJobs)
return
def testB_thresholdTest(self):
"""
_testB_thresholdTest_
Check that the threshold management is working,
this requires checks on pending/running jobs globally
at a site and per task/site
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
site = "T1_US_FNAL"
self.setResourceThresholds(site, pendingSlots=50, runningSlots=220, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 45, 'runningSlots': 200},
Merge={'pendingSlots': 10, 'runningSlots': 20, 'priority': 5})
# Always initialize the submitter after setting the sites, flaky!
jobSubmitter = JobSubmitterPoller(config=config)
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Do pre-submit check
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter.algorithm()
# Check that jobs are in the right state,
# here we are limited by the pending threshold for the Processing task (45)
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 45)
# Check assigned locations
getLocationAction = self.daoFactory(classname="Jobs.GetLocation")
for jobId in result:
loc = getLocationAction.execute(jobid=jobId)
self.assertEqual(loc, [['T1_US_FNAL']])
# Run another cycle, it shouldn't submit anything. Jobs are still in pending
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 45)
# Now put 10 Merge jobs, only 5 can be submitted, there we hit the global pending threshold for the site
nSubs = 1
nJobs = 10
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='Merge')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state='Executing', jobType="Merge")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 45)
# Now let's test running thresholds
# The scenario will be setup as follows: Move all current jobs as running
# Create 300 Processing jobs and 300 merge jobs
# Run 5 polling cycles, moving all pending jobs to running in between
# Result is, merge is left at 30 running 0 pending and processing is left at 240 running 0 pending
# Processing has 110 jobs in queue and Merge 280
# This tests all threshold dynamics including the prioritization of merge over processing
nSubs = 1
nJobs = 300
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site)
jobGroupList.extend(self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='Merge'))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
getRunJobID = self.baDaoFactory(classname="LoadByWMBSID")
setRunJobStatus = self.baDaoFactory(classname="SetStatus")
for i in range(5):
result = getJobsAction.execute(state='Executing')
binds = []
for jobId in result:
binds.append({'id': jobId, 'retry_count': 0})
runJobIds = getRunJobID.execute(binds)
setRunJobStatus.execute([x['id'] for x in runJobIds], 'Running')
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType='Processing')
self.assertEqual(len(result), 240)
result = getJobsAction.execute(state='Created', jobType='Processing')
self.assertEqual(len(result), 110)
result = getJobsAction.execute(state='Executing', jobType='Merge')
self.assertEqual(len(result), 30)
result = getJobsAction.execute(state='Created', jobType='Merge')
self.assertEqual(len(result), 280)
return
def testC_prioTest(self):
"""
_testC_prioTest_
Test whether the correct job type, workflow and task id priorities
are respected in the DAO
"""
workload1 = self.createTestWorkload(name='testWorkload1')
workload2 = self.createTestWorkload(name='testWorkload2')
workload3 = self.createTestWorkload(name='testWorkload3')
workload4 = self.createTestWorkload(name='testWorkload4')
config = self.getConfig()
changeState = ChangeState(config)
getJobsAction = self.daoFactory(classname="Jobs.ListForSubmitter")
site = "T1_US_FNAL"
self.setResourceThresholds(site, pendingSlots=1000, runningSlots=1000,
tasks=['Processing', 'Merge', 'Production', 'Harvesting', 'LogCollect'],
Processing={'pendingSlots': 1000, 'runningSlots': 1000},
Merge={'pendingSlots': 1000, 'runningSlots': 10000},
Production={'pendingSlots': 1000, 'runningSlots': 1000},
Harvesting={'pendingSlots': 1000, 'runningSlots': 1000},
LogCollect={'pendingSlots': 1000, 'runningSlots': 1000})
nSubs = 1
nJobs = 5
jobGroupList = []
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload1.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
name='OldestWorkflow') # task_id = 1
jobGroupList.extend(jobGroup)
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload1.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='Merge') # task_id = 2
jobGroupList.extend(jobGroup)
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload1.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='LogCollect') # task_id = 3
jobGroupList.extend(jobGroup)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# retrieve all 15 jobs created so far
result = getJobsAction.execute(limitRows=100)
self.assertItemsEqual([int(j['task_prio']) for j in result],
[4] * 5 + [2] * 5 + [0] * 5)
self.assertItemsEqual([int(j['wf_priority']) for j in result],
[1] * 15)
self.assertItemsEqual([int(j['task_id']) for j in result],
[2] * 5 + [3] * 5 + [1] * 5)
# now retrieve only 6 jobs (5 Merge and 1 LogCollect), wf prio=1
result = getJobsAction.execute(limitRows=6)
self.assertItemsEqual([int(j['task_prio']) for j in result], [4] * 5 + [2] * 1)
jobGroupList = []
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=2,
task=workload2.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site, taskType='Merge') # task_id = 4
jobGroupList.extend(jobGroup)
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=3,
task=workload3.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site, taskType='Processing') # task_id = 5
jobGroupList.extend(jobGroup)
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=3,
task=workload3.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site, taskType='LogCollect') # task_id = 6
jobGroupList.extend(jobGroup)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# retrieve all 30 jobs created so far
result = getJobsAction.execute(limitRows=100)
self.assertItemsEqual([int(j['task_prio']) for j in result],
[4] * 10 + [2] * 10 + [0] * 10)
# merge prio 2, merge prio 1, logCol prio 3, logCol prio 1, proc prio 3, proc prio 1
self.assertItemsEqual([int(j['wf_priority']) for j in result],
[2] * 5 + [1] * 5 + [3] * 5 + [1] * 5 + [3] * 5 + [1] * 5)
# merge id 4, merge id 2, logCol id 6, logCol id 3, proc id 5, proc id 1
self.assertItemsEqual([int(j['task_id']) for j in result],
[4] * 5 + [2] * 5 + [6] * 5 + [3] * 5 + [5] * 5 + [1] * 5)
jobGroupList = []
jobGroup = self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=2,
task=workload4.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site, taskType='Merge') # task_id = 7
jobGroupList.extend(jobGroup)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# retrieve all 15 Merge jobs created so far
result = getJobsAction.execute(limitRows=15)
self.assertItemsEqual([int(j['task_prio']) for j in result], [4] * 15)
# merge prio 2, merge prio 2, merge prio 1
self.assertItemsEqual([int(j['wf_priority']) for j in result], [2] * 10 + [1] * 5)
# merge id 7, merge id 4, merge id 2
self.assertItemsEqual([int(j['task_id']) for j in result],
[7] * 5 + [4] * 5 + [2] * 5)
def testC_prioritization(self):
"""
_testC_prioritization_
Check that jobs are prioritized by job type and by oldest workflow
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 1
nJobs = 10
site = "T1_US_FNAL"
self.setResourceThresholds(site, pendingSlots=10, runningSlots=10000, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 50, 'runningSlots': 10000},
Merge={'pendingSlots': 10, 'runningSlots': 10000, 'priority': 5})
# Always initialize the submitter after setting the sites, flaky!
jobSubmitter = JobSubmitterPoller(config=config)
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
name='OldestWorkflow')
jobGroupList.extend(self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='Merge'))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
# Merge goes first
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state='Executing', jobType="Merge")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 0)
# Create a newer workflow processing, and after some new jobs for an old workflow
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
name='OldestWorkflow')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
name='NewestWorkflow')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Move pending jobs to running
getRunJobID = self.baDaoFactory(classname="LoadByWMBSID")
setRunJobStatus = self.baDaoFactory(classname="SetStatus")
for idx in range(2):
result = getJobsAction.execute(state='Executing')
binds = []
for jobId in result:
binds.append({'id': jobId, 'retry_count': 0})
runJobIds = getRunJobID.execute(binds)
setRunJobStatus.execute([x['id'] for x in runJobIds], 'Running')
# Run again on created workflows
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state='Executing', jobType="Merge")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 30 - (idx + 1) * 10)
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), (idx + 1) * 10)
# Check that older workflow goes first even with newer jobs
getWorkflowAction = self.daoFactory(classname="Jobs.GetWorkflowTask")
workflows = getWorkflowAction.execute(result)
for workflow in workflows:
self.assertEqual(workflow['name'], 'OldestWorkflow')
return
def testD_SubmitFailed(self):
"""
_testD_SubmitFailed_
Check if jobs without a possible site to run at go to SubmitFailed
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 10
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
site=[],
workloadSpec=self.workloadSpecPath)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config=config)
jobSubmitter.algorithm()
# Jobs should go to submit failed
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='SubmitFailed', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
return
def testE_SiteModesTest(self):
"""
_testE_SiteModesTest_
Test the behavior of the submitter in response to the different
states of the sites
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 1
nJobs = 20
sites = ['T2_US_Florida', 'T2_RU_INR', 'T3_CO_Uniandes', 'T1_US_FNAL']
for site in sites:
self.setResourceThresholds(site, pendingSlots=10, runningSlots=999999, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 10, 'runningSlots': 999999},
Merge={'pendingSlots': 10, 'runningSlots': 999999, 'priority': 5})
myResourceControl = ResourceControl(config)
myResourceControl.changeSiteState('T2_US_Florida', 'Draining')
# First test that we prefer Normal over drain, and T1 over T2/T3
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
site=[x for x in sites],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config=config)
# Actually run it
jobSubmitter.algorithm()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
# All jobs should be at either FNAL, Taiwan or Uniandes. It's a random selection
# Check assigned locations
getLocationAction = self.daoFactory(classname="Jobs.GetLocation")
locationDict = getLocationAction.execute([{'jobid': x} for x in result])
for entry in locationDict:
loc = entry['site_name']
self.assertNotEqual(loc, 'T2_US_Florida')
# Now set everything to down, check we don't submit anything
for site in sites:
myResourceControl.changeSiteState(site, 'Down')
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
site=[x for x in sites],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
# Nothing is submitted despite the empty slots at Uniandes and Florida
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Now set everything to Drain and create Merge jobs. Those should be submitted
for site in sites:
myResourceControl.changeSiteState(site, 'Draining')
nSubsMerge = 1
nJobsMerge = 5
jobGroupList = self.createJobGroups(nSubs=nSubsMerge, nJobs=nJobsMerge,
site=[x for x in sites],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
taskType='Merge')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType='Merge')
self.assertEqual(len(result), nSubsMerge * nJobsMerge)
# Now set everything to Aborted, and create Merge jobs. Those should fail
# since the can only run at one place
for site in sites:
myResourceControl.changeSiteState(site, 'Aborted')
nSubsMerge = 1
nJobsMerge = 5
jobGroupList = self.createJobGroups(nSubs=nSubsMerge, nJobs=nJobsMerge,
site=[x for x in sites],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
taskType='Merge')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
result = getJobsAction.execute(state='SubmitFailed', jobType='Merge')
self.assertEqual(len(result), nSubsMerge * nJobsMerge)
result = getJobsAction.execute(state='Executing', jobType='Processing')
self.assertEqual(len(result), nSubs * nJobs)
return
def testJobSiteDrain(self):
"""
_testJobSiteDrain_
Test the behavior of jobs pending to a single site that is in drain mode
"""
workload = self.createTestWorkload()
config = self.getConfig()
jobSubmitter = JobSubmitterPoller(config=config)
myResourceControl = ResourceControl(config)
changeState = ChangeState(config)
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
nSubs = 1
nJobs = 30
site = 'T2_US_Nebraska'
self.setResourceThresholds(site, pendingSlots=100, runningSlots=100,
tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 10, 'runningSlots': 10},
Merge={'pendingSlots': 10, 'runningSlots': 10, 'priority': 5})
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
site=[site],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# submit first 10 jobs
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
myResourceControl.changeSiteState(site, 'Draining')
# site is now in drain, so don't submit anything
jobSubmitter.algorithm()
# jobs were supposed to get killed, but I guess the MockPlugin doesnt do anything
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state='created', jobType="Processing")
self.assertEqual(len(result), 20)
result = getJobsAction.execute(state='submitfailed', jobType="Processing")
self.assertEqual(len(result), 0)
# make sure the drain grace period expires...
time.sleep(3)
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
# the remaining jobs should have gone to submitfailed by now
result = getJobsAction.execute(state='submitfailed', jobType="Processing")
self.assertEqual(len(result), 20)
result = getJobsAction.execute(state='created', jobType="Processing")
self.assertEqual(len(result), 0)
@attr('integration')
def testF_PollerProfileTest(self):
"""
_testF_PollerProfileTest_
Submit a lot of jobs and test how long it takes for
them to actually be submitted
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 100
nJobs = 100
site = "T1_US_FNAL"
self.setResourceThresholds(site, pendingSlots=20000, runningSlots=999999, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 10000, 'runningSlots': 999999},
Merge={'pendingSlots': 10000, 'runningSlots': 999999, 'priority': 5})
# Always initialize the submitter after setting the sites, flaky!
JobSubmitterPoller(config=config)
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site)
jobGroupList.extend(self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=site,
taskType='Merge'))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Actually run it
startTime = time.time()
cProfile.runctx("JobSubmitterPoller(config=config).algorithm()", globals(), locals(), filename="testStats.stat")
stopTime = time.time()
print("Job took %f seconds to complete" % (stopTime - startTime))
p = pstats.Stats('testStats.stat')
p.sort_stats('cumulative')
p.print_stats()
return
@attr('integration')
def testMemoryProfile(self):
"""
_testMemoryProfile_
Creates 20k jobs and keep refreshing the cache and submitting
them between the components cycle
Example using memory_profiler library, unfortunately the source
code has to be updated with decorators.
NOTE: Never run it on jenkins
"""
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
# myResourceControl = ResourceControl(config)
nSubs = 20
nJobs = 100
sites = ['T2_US_Florida', 'T2_RU_INR', 'T3_CO_Uniandes', 'T1_US_FNAL']
allSites = CRIC().PSNtoPNNMap('*')
for site in allSites:
self.setResourceThresholds(site, pendingSlots=20000, runningSlots=999999, tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 10000, 'runningSlots': 999999},
Merge={'pendingSlots': 10000, 'runningSlots': 999999, 'priority': 5})
# Always initialize the submitter after setting the sites, flaky!
jobSubmitter = JobSubmitterPoller(config=config)
self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=10,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=[x for x in sites], changeState=changeState)
# Actually run it
jobSubmitter.algorithm() # cycle 1
self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=10,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=[x for x in sites], changeState=changeState)
# myResourceControl.changeSiteState('T2_US_Florida', 'Draining')
jobSubmitter.algorithm() # cycle 2
self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=10,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=[x for x in sites], changeState=changeState)
# myResourceControl.changeSiteState('T2_RU_INR', 'Draining')
jobSubmitter.algorithm() # cycle 3
self.createJobGroups(nSubs=nSubs, nJobs=nJobs, wfPrio=10,
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath,
site=[x for x in sites], changeState=changeState)
# myResourceControl.changeSiteState('T3_CO_Uniandes', 'Draining')
jobSubmitter.algorithm() # cycle 4
# myResourceControl.changeSiteState('T2_RU_INR', 'Normal')
jobSubmitter.algorithm() # cycle 5
# myResourceControl.changeSiteState('T2_US_Florida', 'Normal')
jobSubmitter.algorithm() # cycle 6
# myResourceControl.changeSiteState('T2_RU_INR', 'Normal')
jobSubmitter.algorithm() # cycle 7
# myResourceControl.changeSiteState('T3_CO_Uniandes', 'Normal')
jobSubmitter.algorithm() # cycle 8
jobSubmitter.algorithm() # cycle 9, nothing to submit
return
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"DATABASE",
"TESTDIR",
"COUCHURL",
"DBSOCK"
] |
[]
|
["DATABASE", "TESTDIR", "COUCHURL", "DBSOCK"]
|
python
| 4 | 0 | |
scripts/interfaces.py
|
""" Sets up the interfaces for the Pi """
import os
import socket
import struct
import netifaces # pylint: disable=import-error
def decimal_to_ip(decimal):
""" Converts a decimal into an IP address """
return socket.inet_ntoa(struct.pack('!L', decimal))
def get_default_gateway_linux():
""" Read the default gateway directly from /proc. """
with open("/proc/net/route") as file_handler:
for line in file_handler:
fields = line.strip().split()
if fields[1] != "00000000" or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
def get_ip_address(start_ip, end_ip):
""" Gets an unused IP address in the range """
start_ip_decimal = ip_to_decimal(start_ip)
end_ip_decimal = ip_to_decimal(end_ip)
target_ip = None
# Try each of these
for current_ip_decimal in range(start_ip_decimal, end_ip_decimal + 1):
current_ip = decimal_to_ip(current_ip_decimal)
result = os.system('ping -c 1 ' + current_ip)
if result == 0:
# IP in use
continue
else:
# The IP address is free
target_ip = current_ip
break
if target_ip is None:
raise Exception('Unable to find a free IP address between ' + start_ip + ' and ' + end_ip)
return target_ip
def ip_to_decimal(ip_address):
""" Converts an IP into decimal """
packed_ip = socket.inet_aton(ip_address)
return struct.unpack("!L", packed_ip)[0]
IFACES = [
'eth0',
'wlan0',
'wlan1',
'wlp2s0'
]
VALUES = []
TARGET_IP_ADDRESS = None
CONFIG_START_IP = os.environ.get('PI_IP_ADDRESS_RANGE_START')
CONFIG_END_IP = os.environ.get('PI_IP_ADDRESS_RANGE_END')
DNS_ADDRESS = os.environ.get('PI_DNS_ADDRESS')
if CONFIG_START_IP is not None and CONFIG_END_IP is not None:
TARGET_IP_ADDRESS = get_ip_address(CONFIG_START_IP, CONFIG_END_IP)
for iface in IFACES:
try:
gateway = get_default_gateway_linux()
addrs = netifaces.ifaddresses(iface)[netifaces.AF_INET]
if TARGET_IP_ADDRESS is not None:
# We've got an IP address range set
addrs[0]['addr'] = TARGET_IP_ADDRESS
VALUES.append({
"iface": iface,
"values": [
addrs[0],
gateway
]
})
except Exception as err: # pylint: disable=broad-except
# Do nothing
print(err)
if len(VALUES) == 0: # pylint: disable=len-as-condition
raise Exception('No interfaces available')
# Do the output
with open(os.environ.get('NETWORK_CONFIG'), 'w') as the_file:
the_file.write('# interfaces(5) file used by ifup(8) and ifdown(8)\n')
the_file.write('\n')
the_file.write('# Please note that this file is written to be used with dhcpcd\n')
the_file.write('# For static IP, consult /etc/dhcpcd.conf and "man dhcpcd.conf"\n')
the_file.write('\n')
the_file.write('# Include files from /etc/network/interfaces.d:\n')
the_file.write('source-directory /etc/network/interfaces.d\n')
the_file.write('\n')
the_file.write('auto lo\n')
the_file.write('iface lo inet loopback\n')
the_file.write('\n')
for value in VALUES:
iface = value['iface']
VALUES = value['values']
if iface.startswith('wlan'):
the_file.write('allow-hotplug ' + iface + '\n')
if VALUES:
the_file.write('auto ' + iface + '\n')
the_file.write('iface ' + iface + ' inet static\n')
the_file.write(' wireless-power off\n')
the_file.write(' address ' + VALUES[0]['addr'] + '\n')
the_file.write(' netmask ' + VALUES[0]['netmask'] + '\n')
the_file.write(' gateway ' + VALUES[1] + '\n')
the_file.write(' dns-nameservers ' + DNS_ADDRESS + '\n')
else:
the_file.write('iface ' + iface + ' inet manual\n')
if iface.startswith('wlan'):
the_file.write(' wpa-conf /etc/wpa_supplicant/wpa_supplicant.conf\n')
the_file.write('\n')
with open(os.environ.get('TARGET_IP'), 'w') as the_file:
the_file.write(TARGET_IP_ADDRESS)
|
[] |
[] |
[
"NETWORK_CONFIG",
"PI_DNS_ADDRESS",
"PI_IP_ADDRESS_RANGE_END",
"TARGET_IP",
"PI_IP_ADDRESS_RANGE_START"
] |
[]
|
["NETWORK_CONFIG", "PI_DNS_ADDRESS", "PI_IP_ADDRESS_RANGE_END", "TARGET_IP", "PI_IP_ADDRESS_RANGE_START"]
|
python
| 5 | 0 | |
example/03. echo.go
|
package main
import (
"context"
"fmt"
. "github.com/spyzhov/tg"
"log"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
bot := New(os.Getenv("TOKEN"))
bot.Log = log.Printf
ctx, cancel := context.WithTimeout(context.Background(), time.Second*600)
go func() {
timer := time.NewTicker(time.Second)
last := 0
defer timer.Stop()
for {
<-timer.C
updates, err := bot.GetUpdates(ctx, &GetUpdatesRequest{
Offset: last + 1,
Limit: 10,
AllowedUpdates: []string{"message"},
})
check(err)
for i, update := range updates {
fmt.Printf("update [%03d]: %#v\n", i, update.Message)
last = update.UpdateId
if update.Message != nil {
if update.Message.Sticker != nil {
_, err = bot.SendMessage(ctx, &SendMessageRequest{
ChatId: update.Message.Chat.Id,
Text: "You send me a sticker: _" + update.Message.Sticker.FileId + "_ from *" + update.Message.Sticker.SetName + "*",
ParseMode: "Markdown",
})
} else {
_, err = bot.SendMessage(ctx, &SendMessageRequest{
ChatId: update.Message.Chat.Id,
Text: "You wrote me: _" + update.Message.Text + "_",
ParseMode: "Markdown",
})
}
check(err)
}
}
}
}()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
select {
case <-sigs:
cancel()
fmt.Println("exiting...")
time.Sleep(time.Second)
case <-ctx.Done():
fmt.Println("Done!")
}
}
func check(err error) {
if err != nil {
panic(err)
}
}
|
[
"\"TOKEN\""
] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
go
| 1 | 0 | |
plugin/pkg/auth/authenticator/token/oidc/oidc.go
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
oidc implements the authenticator.Token interface using the OpenID Connect protocol.
config := oidc.OIDCOptions{
IssuerURL: "https://accounts.google.com",
ClientID: os.Getenv("GOOGLE_CLIENT_ID"),
UsernameClaim: "email",
}
tokenAuthenticator, err := oidc.New(config)
*/
package oidc
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/http"
"net/url"
"sync"
"sync/atomic"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/oidc"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/auth/user"
certutil "k8s.io/kubernetes/pkg/util/cert"
"k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/pkg/util/runtime"
)
type OIDCOptions struct {
// IssuerURL is the URL the provider signs ID Tokens as. This will be the "iss"
// field of all tokens produced by the provider and is used for configuration
// discovery.
//
// The URL is usually the provider's URL without a path, for example
// "https://accounts.google.com" or "https://login.salesforce.com".
//
// The provider must implement configuration discovery.
// See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig
IssuerURL string
// ClientID the JWT must be issued for, the "sub" field. This plugin only trusts a single
// client to ensure the plugin can be used with public providers.
//
// The plugin supports the "authorized party" OpenID Connect claim, which allows
// specialized providers to issue tokens to a client for a different client.
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDToken
ClientID string
// Path to a PEM encoded root certificate of the provider.
CAFile string
// UsernameClaim is the JWT field to use as the user's username.
UsernameClaim string
// GroupsClaim, if specified, causes the OIDCAuthenticator to try to populate the user's
// groups with a ID Token field. If the GrouppClaim field is present in a ID Token the value
// must be a string or list of strings.
GroupsClaim string
}
type OIDCAuthenticator struct {
issuerURL string
trustedClientID string
usernameClaim string
groupsClaim string
httpClient *http.Client
// Contains an *oidc.Client. Do not access directly. Use client() method.
oidcClient atomic.Value
// Guards the close method and is used to lock during initialization and closing.
mu sync.Mutex
close func() // May be nil
}
// New creates a token authenticator which validates OpenID Connect ID Tokens.
func New(opts OIDCOptions) (*OIDCAuthenticator, error) {
url, err := url.Parse(opts.IssuerURL)
if err != nil {
return nil, err
}
if url.Scheme != "https" {
return nil, fmt.Errorf("'oidc-issuer-url' (%q) has invalid scheme (%q), require 'https'", opts.IssuerURL, url.Scheme)
}
if opts.UsernameClaim == "" {
return nil, errors.New("no username claim provided")
}
var roots *x509.CertPool
if opts.CAFile != "" {
roots, err = certutil.NewPool(opts.CAFile)
if err != nil {
return nil, fmt.Errorf("Failed to read the CA file: %v", err)
}
} else {
glog.Info("OIDC: No x509 certificates provided, will use host's root CA set")
}
// Copied from http.DefaultTransport.
tr := net.SetTransportDefaults(&http.Transport{
// According to golang's doc, if RootCAs is nil,
// TLS uses the host's root CA set.
TLSClientConfig: &tls.Config{RootCAs: roots},
})
authenticator := &OIDCAuthenticator{
issuerURL: opts.IssuerURL,
trustedClientID: opts.ClientID,
usernameClaim: opts.UsernameClaim,
groupsClaim: opts.GroupsClaim,
httpClient: &http.Client{Transport: tr},
}
// Attempt to initialize the authenticator asynchronously.
//
// Ignore errors instead of returning it since the OpenID Connect provider might not be
// available yet, for instance if it's running on the cluster and needs the API server
// to come up first. Errors will be logged within the client() method.
go func() {
defer runtime.HandleCrash()
authenticator.client()
}()
return authenticator, nil
}
// Close stops all goroutines used by the authenticator.
func (a *OIDCAuthenticator) Close() {
a.mu.Lock()
defer a.mu.Unlock()
if a.close != nil {
a.close()
}
return
}
func (a *OIDCAuthenticator) client() (*oidc.Client, error) {
// Fast check to see if client has already been initialized.
if client := a.oidcClient.Load(); client != nil {
return client.(*oidc.Client), nil
}
// Acquire lock, then recheck initialization.
a.mu.Lock()
defer a.mu.Unlock()
if client := a.oidcClient.Load(); client != nil {
return client.(*oidc.Client), nil
}
// Try to initialize client.
providerConfig, err := oidc.FetchProviderConfig(a.httpClient, a.issuerURL)
if err != nil {
glog.Errorf("oidc authenticator: failed to fetch provider discovery data: %v", err)
return nil, fmt.Errorf("fetch provider config: %v", err)
}
clientConfig := oidc.ClientConfig{
HTTPClient: a.httpClient,
Credentials: oidc.ClientCredentials{ID: a.trustedClientID},
ProviderConfig: providerConfig,
}
client, err := oidc.NewClient(clientConfig)
if err != nil {
glog.Errorf("oidc authenticator: failed to create client: %v", err)
return nil, fmt.Errorf("create client: %v", err)
}
// SyncProviderConfig will start a goroutine to periodically synchronize the provider config.
// The synchronization interval is set by the expiration length of the config, and has a minimum
// and maximum threshold.
stop := client.SyncProviderConfig(a.issuerURL)
a.oidcClient.Store(client)
a.close = func() {
// This assumes the stop is an unbuffered channel.
// So instead of closing the channel, we send am empty struct here.
// This guarantees that when this function returns, there is no flying requests,
// because a send to an unbuffered channel happens after the receive from the channel.
stop <- struct{}{}
}
return client, nil
}
// AuthenticateToken decodes and verifies a ID Token using the OIDC client, if the verification succeeds,
// then it will extract the user info from the JWT claims.
func (a *OIDCAuthenticator) AuthenticateToken(value string) (user.Info, bool, error) {
jwt, err := jose.ParseJWT(value)
if err != nil {
return nil, false, err
}
client, err := a.client()
if err != nil {
return nil, false, err
}
if err := client.VerifyJWT(jwt); err != nil {
return nil, false, err
}
claims, err := jwt.Claims()
if err != nil {
return nil, false, err
}
claim, ok, err := claims.StringClaim(a.usernameClaim)
if err != nil {
return nil, false, err
}
if !ok {
return nil, false, fmt.Errorf("cannot find %q in JWT claims", a.usernameClaim)
}
var username string
switch a.usernameClaim {
case "email":
// TODO(yifan): Check 'email_verified' to make sure the email is valid.
username = claim
default:
// For all other cases, use issuerURL + claim as the user name.
username = fmt.Sprintf("%s#%s", a.issuerURL, claim)
}
// TODO(yifan): Add UID, also populate the issuer to upper layer.
info := &user.DefaultInfo{Name: username}
if a.groupsClaim != "" {
groups, found, err := claims.StringsClaim(a.groupsClaim)
if err != nil {
// Groups type is present but is not an array of strings, try to decode as a string.
group, _, err := claims.StringClaim(a.groupsClaim)
if err != nil {
// Custom claim is present, but isn't an array of strings or a string.
return nil, false, fmt.Errorf("custom group claim contains invalid type: %T", claims[a.groupsClaim])
}
info.Groups = []string{group}
} else if found {
info.Groups = groups
}
}
return info, true, nil
}
|
[
"\"GOOGLE_CLIENT_ID\""
] |
[] |
[
"GOOGLE_CLIENT_ID"
] |
[]
|
["GOOGLE_CLIENT_ID"]
|
go
| 1 | 0 | |
irmark1/templates/complete.py
|
#!/usr/bin/env python3
"""
Scripts to drive an IR Mark One (2) car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--camera=(single|stereo)] [--meta=<key:value> ...]
manage.py (train) [--tub=<tub1,tub2,..tubn>] [--file=<file> ...] (--model=<model>) [--transfer=<model>] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
"""
import os
import time
from docopt import docopt
import numpy as np
import irmark1 as m1
#import parts
from irmark1.parts.transform import Lambda, TriggeredCallback, DelayedTrigger
from irmark1.parts.datastore import TubHandler
from irmark1.parts.controller import LocalWebController, JoystickController
from irmark1.parts.throttle_filter import ThrottleFilter
from irmark1.parts.behavior import BehaviorPart
from irmark1.parts.file_watcher import FileWatcher
from irmark1.parts.launch import AiLaunch
from irmark1.utils import *
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[] ):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = m1.vehicle.Vehicle()
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from irmark1.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
else:
print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from irmark1.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from irmark1.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from irmark1.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from irmark1.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "D435i":
from irmark1.parts.realsense2 import RS_D435i
cam = RS_D435i(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
from irmark1.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from irmark1.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
else:
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController()
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from irmark1.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
#IMU
if cfg.HAVE_IMU:
from irmark1.parts.imu import Mpu6050
imu = Mpu6050()
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
class ImgPreProcess():
'''
preprocess camera image for inference.
normalize and crop if needed.
'''
def __init__(self, cfg):
self.cfg = cfg
def run(self, img_arr):
return normalize_and_crop(img_arr, self.cfg)
if "coral" in model_type:
inf_input = 'cam/image_array'
else:
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
#IMU
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=[inf_input,
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs=[inf_input]
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = m1.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle
else:
return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM:
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from irmark1.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from irmark1.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from irmark1.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(motor, inputs=["throttle"])
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.HAVE_IMU:
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
if cfg.PUB_CAMERA_IMAGES:
from irmark1.parts.network import TCPServeValue
from irmark1.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
print("You can now go to <your pi ip address>:8887 to drive your car.")
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
#tell the controller about the tub
ctr.set_tub(tub)
if cfg.BUTTON_PRESS_NEW_TUB:
def new_tub_dir():
V.parts.pop()
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
ctr.set_tub(tub)
ctr.set_button_down_trigger('cross', new_tub_dir)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = m1.load_config()
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'], model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
if args['train']:
from train import multi_train, preprocessFileList
tub = args['--tub']
model = args['--model']
transfer = args['--transfer']
model_type = args['--type']
continuous = args['--continuous']
aug = args['--aug']
dirs = preprocessFileList( args['--file'] )
if tub is not None:
tub_paths = [os.path.expanduser(n) for n in tub.split(',')]
dirs.extend( tub_paths )
multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
codewars/algorithm/find_the_parity_outlier.go
|
package algorithm
// FindOutlier from
// https://www.codewars.com/kata/5526fc09a1bbd946250002dc/train/go
func FindOutlier(integers []int) int {
m := make(map[int][]int)
m[0] = make([]int, 0)
m[1] = make([]int, 0)
for _, n := range integers {
isOdd := n % 2
if isOdd < 0 {
isOdd *= -1
}
m[isOdd] = append(m[isOdd], n)
nn, gotIt := getTheOutlier(m)
if gotIt {
return nn
}
}
return -1
}
func getTheOutlier(m map[int][]int) (int, bool) {
if len(m[0]) == 1 && len(m[1]) > 1 {
return m[0][0], true
}
if len(m[0]) > 1 && len(m[1]) == 1 {
return m[1][0], true
}
return -1, false
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hackforthesea.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
webapp/newgrad/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
http.HandleFunc("/", indexHandler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Welcome to CA!")
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
conn.go
|
package pq
import (
"bufio"
"crypto/md5"
"database/sql"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"unicode"
"github.com/lib/pq/oid"
)
// Common error types
var (
ErrNotSupported = errors.New("pq: Unsupported command")
ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction")
ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server")
ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less")
ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly")
errUnexpectedReady = errors.New("unexpected ReadyForQuery")
errNoRowsAffected = errors.New("no RowsAffected available after the empty statement")
errNoLastInsertID = errors.New("no LastInsertId available after the empty statement")
)
// Driver is the Postgres database driver.
type Driver struct{}
// Open opens a new connection to the database. name is a connection string.
// Most users should only use it through database/sql package from the standard
// library.
func (d *Driver) Open(name string) (driver.Conn, error) {
return Open(name)
}
func init() {
sql.Register("postgres", &Driver{})
}
type parameterStatus struct {
// server version in the same format as server_version_num, or 0 if
// unavailable
serverVersion int
// the current location based on the TimeZone value of the session, if
// available
currentLocation *time.Location
}
type transactionStatus byte
const (
txnStatusIdle transactionStatus = 'I'
txnStatusIdleInTransaction transactionStatus = 'T'
txnStatusInFailedTransaction transactionStatus = 'E'
)
func (s transactionStatus) String() string {
switch s {
case txnStatusIdle:
return "idle"
case txnStatusIdleInTransaction:
return "idle in transaction"
case txnStatusInFailedTransaction:
return "in a failed transaction"
default:
errorf("unknown transactionStatus %d", s)
}
panic("not reached")
}
// Dialer is the dialer interface. It can be used to obtain more control over
// how pq creates network connections.
type Dialer interface {
Dial(network, address string) (net.Conn, error)
DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
}
type defaultDialer struct{}
func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) {
return net.Dial(ntw, addr)
}
func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(ntw, addr, timeout)
}
type conn struct {
c net.Conn
buf *bufio.Reader
namei int
scratch [512]byte
txnStatus transactionStatus
txnFinish func()
// Save connection arguments to use during CancelRequest.
dialer Dialer
opts values
// Cancellation key data for use with CancelRequest messages.
processID int
secretKey int
parameterStatus parameterStatus
saveMessageType byte
saveMessageBuffer []byte
// If true, this connection is bad and all public-facing functions should
// return ErrBadConn.
bad bool
// If set, this connection should never use the binary format when
// receiving query results from prepared statements. Only provided for
// debugging.
disablePreparedBinaryResult bool
// Whether to always send []byte parameters over as binary. Enables single
// round-trip mode for non-prepared Query calls.
binaryParameters bool
// If true this connection is in the middle of a COPY
inCopy bool
// If not nil, notices will be synchronously sent here
noticeHandler func(*Error)
}
// Handle driver-side settings in parsed connection string.
func (cn *conn) handleDriverSettings(o values) (err error) {
boolSetting := func(key string, val *bool) error {
if value, ok := o[key]; ok {
if value == "yes" {
*val = true
} else if value == "no" {
*val = false
} else {
return fmt.Errorf("unrecognized value %q for %s", value, key)
}
}
return nil
}
err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult)
if err != nil {
return err
}
return boolSetting("binary_parameters", &cn.binaryParameters)
}
func (cn *conn) handlePgpass(o values) {
// if a password was supplied, do not process .pgpass
if _, ok := o["password"]; ok {
return
}
filename := os.Getenv("PGPASSFILE")
if filename == "" {
// XXX this code doesn't work on Windows where the default filename is
// XXX %APPDATA%\postgresql\pgpass.conf
// Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
userHome := os.Getenv("HOME")
if userHome == "" {
user, err := user.Current()
if err != nil {
return
}
userHome = user.HomeDir
}
filename = filepath.Join(userHome, ".pgpass")
}
fileinfo, err := os.Stat(filename)
if err != nil {
return
}
mode := fileinfo.Mode()
if mode&(0x77) != 0 {
// XXX should warn about incorrect .pgpass permissions as psql does
return
}
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
scanner := bufio.NewScanner(io.Reader(file))
hostname := o["host"]
ntw, _ := network(o)
port := o["port"]
db := o["dbname"]
username := o["user"]
// From: https://github.com/tg/pgpass/blob/master/reader.go
getFields := func(s string) []string {
fs := make([]string, 0, 5)
f := make([]rune, 0, len(s))
var esc bool
for _, c := range s {
switch {
case esc:
f = append(f, c)
esc = false
case c == '\\':
esc = true
case c == ':':
fs = append(fs, string(f))
f = f[:0]
default:
f = append(f, c)
}
}
return append(fs, string(f))
}
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
continue
}
split := getFields(line)
if len(split) != 5 {
continue
}
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
o["password"] = split[4]
return
}
}
}
func (cn *conn) writeBuf(b byte) *writeBuf {
cn.scratch[0] = b
return &writeBuf{
buf: cn.scratch[:5],
pos: 1,
}
}
// Open opens a new connection to the database. name is a connection string.
// Most users should only use it through database/sql package from the standard
// library.
func Open(name string) (_ driver.Conn, err error) {
return DialOpen(defaultDialer{}, name)
}
// DialOpen opens a new connection to the database using a dialer.
func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
// Handle any panics during connection initialization. Note that we
// specifically do *not* want to use errRecover(), as that would turn any
// connection errors into ErrBadConns, hiding the real error message from
// the user.
defer errRecoverNoErrBadConn(&err)
o := make(values)
// A number of defaults are applied here, in this order:
//
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
o["host"] = "localhost"
o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
o[k] = v
}
if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
name, err = ParseURL(name)
if err != nil {
return nil, err
}
}
if err := parseOpts(name, o); err != nil {
return nil, err
}
// Use the "fallback" application name if necessary
if fallback, ok := o["fallback_application_name"]; ok {
if _, ok := o["application_name"]; !ok {
o["application_name"] = fallback
}
}
// We can't work with any client_encoding other than UTF-8 currently.
// However, we have historically allowed the user to set it to UTF-8
// explicitly, and there's no reason to break such programs, so allow that.
// Note that the "options" setting could also set client_encoding, but
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
"ISO, MDY", datestyle))
}
} else {
o["datestyle"] = "ISO, MDY"
}
// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
}
o["user"] = u
}
cn := &conn{
opts: o,
dialer: d,
}
err = cn.handleDriverSettings(o)
if err != nil {
return nil, err
}
cn.handlePgpass(o)
cn.c, err = dial(d, o)
if err != nil {
return nil, err
}
err = cn.ssl(o)
if err != nil {
return nil, err
}
// cn.startup panics on error. Make sure we don't leak cn.c.
panicking := true
defer func() {
if panicking {
cn.c.Close()
}
}()
cn.buf = bufio.NewReader(cn.c)
cn.startup(o)
// reset the deadline, in case one was set (see dial)
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
err = cn.c.SetDeadline(time.Time{})
}
panicking = false
return cn, err
}
func dial(d Dialer, o values) (net.Conn, error) {
ntw, addr := network(o)
// SSL is not necessary or supported over UNIX domain sockets
if ntw == "unix" {
o["sslmode"] = "disable"
}
// Zero or not specified means wait indefinitely.
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
seconds, err := strconv.ParseInt(timeout, 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
}
duration := time.Duration(seconds) * time.Second
// connect_timeout should apply to the entire connection establishment
// procedure, so we both use a timeout for the TCP connection
// establishment and set a deadline for doing the initial handshake.
// The deadline is then reset after startup() is done.
deadline := time.Now().Add(duration)
conn, err := d.DialTimeout(ntw, addr, duration)
if err != nil {
return nil, err
}
err = conn.SetDeadline(deadline)
return conn, err
}
return d.Dial(ntw, addr)
}
func network(o values) (string, string) {
host := o["host"]
if strings.HasPrefix(host, "/") {
sockPath := path.Join(host, ".s.PGSQL."+o["port"])
return "unix", sockPath
}
return "tcp", net.JoinHostPort(host, o["port"])
}
type values map[string]string
// scanner implements a tokenizer for libpq-style option strings.
type scanner struct {
s []rune
i int
}
// newScanner returns a new scanner initialized with the option string s.
func newScanner(s string) *scanner {
return &scanner{[]rune(s), 0}
}
// Next returns the next rune.
// It returns 0, false if the end of the text has been reached.
func (s *scanner) Next() (rune, bool) {
if s.i >= len(s.s) {
return 0, false
}
r := s.s[s.i]
s.i++
return r, true
}
// SkipSpaces returns the next non-whitespace rune.
// It returns 0, false if the end of the text has been reached.
func (s *scanner) SkipSpaces() (rune, bool) {
r, ok := s.Next()
for unicode.IsSpace(r) && ok {
r, ok = s.Next()
}
return r, ok
}
// parseOpts parses the options from name and adds them to the values.
//
// The parsing code is based on conninfo_parse from libpq's fe-connect.c
func parseOpts(name string, o values) error {
s := newScanner(name)
for {
var (
keyRunes, valRunes []rune
r rune
ok bool
)
if r, ok = s.SkipSpaces(); !ok {
break
}
// Scan the key
for !unicode.IsSpace(r) && r != '=' {
keyRunes = append(keyRunes, r)
if r, ok = s.Next(); !ok {
break
}
}
// Skip any whitespace if we're not at the = yet
if r != '=' {
r, ok = s.SkipSpaces()
}
// The current character should be =
if r != '=' || !ok {
return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
}
// Skip any whitespace after the =
if r, ok = s.SkipSpaces(); !ok {
// If we reach the end here, the last value is just an empty string as per libpq.
o[string(keyRunes)] = ""
break
}
if r != '\'' {
for !unicode.IsSpace(r) {
if r == '\\' {
if r, ok = s.Next(); !ok {
return fmt.Errorf(`missing character after backslash`)
}
}
valRunes = append(valRunes, r)
if r, ok = s.Next(); !ok {
break
}
}
} else {
quote:
for {
if r, ok = s.Next(); !ok {
return fmt.Errorf(`unterminated quoted string literal in connection string`)
}
switch r {
case '\'':
break quote
case '\\':
r, _ = s.Next()
fallthrough
default:
valRunes = append(valRunes, r)
}
}
}
o[string(keyRunes)] = string(valRunes)
}
return nil
}
func (cn *conn) isInTransaction() bool {
return cn.txnStatus == txnStatusIdleInTransaction ||
cn.txnStatus == txnStatusInFailedTransaction
}
func (cn *conn) checkIsInTransaction(intxn bool) {
if cn.isInTransaction() != intxn {
cn.bad = true
errorf("unexpected transaction status %v", cn.txnStatus)
}
}
func (cn *conn) Begin() (_ driver.Tx, err error) {
return cn.begin("")
}
func (cn *conn) begin(mode string) (_ driver.Tx, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(false)
_, commandTag, err := cn.simpleExec("BEGIN" + mode)
if err != nil {
return nil, err
}
if commandTag != "BEGIN" {
cn.bad = true
return nil, fmt.Errorf("unexpected command tag %s", commandTag)
}
if cn.txnStatus != txnStatusIdleInTransaction {
cn.bad = true
return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus)
}
return cn, nil
}
func (cn *conn) closeTxn() {
if finish := cn.txnFinish; finish != nil {
finish()
}
}
func (cn *conn) Commit() (err error) {
defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(true)
// We don't want the client to think that everything is okay if it tries
// to commit a failed transaction. However, no matter what we return,
// database/sql will release this connection back into the free connection
// pool so we have to abort the current transaction here. Note that you
// would get the same behaviour if you issued a COMMIT in a failed
// transaction, so it's also the least surprising thing to do here.
if cn.txnStatus == txnStatusInFailedTransaction {
if err := cn.Rollback(); err != nil {
return err
}
return ErrInFailedTransaction
}
_, commandTag, err := cn.simpleExec("COMMIT")
if err != nil {
if cn.isInTransaction() {
cn.bad = true
}
return err
}
if commandTag != "COMMIT" {
cn.bad = true
return fmt.Errorf("unexpected command tag %s", commandTag)
}
cn.checkIsInTransaction(false)
return nil
}
func (cn *conn) Rollback() (err error) {
defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
defer cn.errRecover(&err)
cn.checkIsInTransaction(true)
_, commandTag, err := cn.simpleExec("ROLLBACK")
if err != nil {
if cn.isInTransaction() {
cn.bad = true
}
return err
}
if commandTag != "ROLLBACK" {
return fmt.Errorf("unexpected command tag %s", commandTag)
}
cn.checkIsInTransaction(false)
return nil
}
func (cn *conn) gname() string {
cn.namei++
return strconv.FormatInt(int64(cn.namei), 10)
}
func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) {
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'C':
res, commandTag = cn.parseComplete(r.string())
case 'Z':
cn.processReadyForQuery(r)
if res == nil && err == nil {
err = errUnexpectedReady
}
// done
return
case 'E':
err = parseError(r)
case 'I':
res = emptyRows
case 'T', 'D':
// ignore any results
default:
cn.bad = true
errorf("unknown response for simple query: %q", t)
}
}
}
func (cn *conn) simpleQuery(q string) (res *rows, err error) {
defer cn.errRecover(&err)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'C', 'I':
// We allow queries which don't return any results through Query as
// well as Exec. We still have to give database/sql a rows object
// the user can close, though, to avoid connections from being
// leaked. A "rows" with done=true works fine for that purpose.
if err != nil {
cn.bad = true
errorf("unexpected message %q in simple query execution", t)
}
if res == nil {
res = &rows{
cn: cn,
}
}
// Set the result and tag to the last command complete if there wasn't a
// query already run. Although queries usually return from here and cede
// control to Next, a query with zero results does not.
if t == 'C' && res.colNames == nil {
res.result, res.tag = cn.parseComplete(r.string())
}
res.done = true
case 'Z':
cn.processReadyForQuery(r)
// done
return
case 'E':
res = nil
err = parseError(r)
case 'D':
if res == nil {
cn.bad = true
errorf("unexpected DataRow in simple query execution")
}
// the query didn't fail; kick off to Next
cn.saveMessage(t, r)
return
case 'T':
// res might be non-nil here if we received a previous
// CommandComplete, but that's fine; just overwrite it
res = &rows{cn: cn}
res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r)
// To work around a bug in QueryRow in Go 1.2 and earlier, wait
// until the first DataRow has been received.
default:
cn.bad = true
errorf("unknown response for simple query: %q", t)
}
}
}
type noRows struct{}
var emptyRows noRows
var _ driver.Result = noRows{}
func (noRows) LastInsertId() (int64, error) {
return 0, errNoLastInsertID
}
func (noRows) RowsAffected() (int64, error) {
return 0, errNoRowsAffected
}
// Decides which column formats to use for a prepared statement. The input is
// an array of type oids, one element per result column.
func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
if len(colTyps) == 0 {
return nil, colFmtDataAllText
}
colFmts = make([]format, len(colTyps))
if forceText {
return colFmts, colFmtDataAllText
}
allBinary := true
allText := true
for i, t := range colTyps {
switch t.OID {
// This is the list of types to use binary mode for when receiving them
// through a prepared statement. If a type appears in this list, it
// must also be implemented in binaryDecode in encode.go.
case oid.T_bytea:
fallthrough
case oid.T_int8:
fallthrough
case oid.T_int4:
fallthrough
case oid.T_int2:
fallthrough
case oid.T_uuid:
colFmts[i] = formatBinary
allText = false
default:
allBinary = false
}
}
if allBinary {
return colFmts, colFmtDataAllBinary
} else if allText {
return colFmts, colFmtDataAllText
} else {
colFmtData = make([]byte, 2+len(colFmts)*2)
binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts)))
for i, v := range colFmts {
binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v))
}
return colFmts, colFmtData
}
}
func (cn *conn) prepareTo(q, stmtName string) *stmt {
st := &stmt{cn: cn, name: stmtName}
b := cn.writeBuf('P')
b.string(st.name)
b.string(q)
b.int16(0)
b.next('D')
b.byte('S')
b.string(st.name)
b.next('S')
cn.send(b)
cn.readParseResponse()
st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse()
st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult)
cn.readReadyForQuery()
return st
}
func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") {
s, err := cn.prepareCopyIn(q)
if err == nil {
cn.inCopy = true
}
return s, err
}
return cn.prepareTo(q, cn.gname()), nil
}
func (cn *conn) Close() (err error) {
// Skip cn.bad return here because we always want to close a connection.
defer cn.errRecover(&err)
// Ensure that cn.c.Close is always run. Since error handling is done with
// panics and cn.errRecover, the Close must be in a defer.
defer func() {
cerr := cn.c.Close()
if err == nil {
err = cerr
}
}()
// Don't go through send(); ListenerConn relies on us not scribbling on the
// scratch buffer of this connection.
return cn.sendSimpleMessage('X')
}
// Implement the "Queryer" interface
func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
return cn.query(query, args)
}
func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
if cn.inCopy {
return nil, errCopyInProgress
}
defer cn.errRecover(&err)
// Check to see if we can use the "simpleQuery" interface, which is
// *much* faster than going through prepare/exec
if len(args) == 0 {
return cn.simpleQuery(query)
}
if cn.binaryParameters {
cn.sendBinaryModeQuery(query, args)
cn.readParseResponse()
cn.readBindResponse()
rows := &rows{cn: cn}
rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse()
cn.postExecuteWorkaround()
return rows, nil
}
st := cn.prepareTo(query, "")
st.exec(args)
return &rows{
cn: cn,
colNames: st.colNames,
colTyps: st.colTyps,
colFmts: st.colFmts,
}, nil
}
// Implement the optional "Execer" interface for one-shot queries
func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
defer cn.errRecover(&err)
// Check to see if we can use the "simpleExec" interface, which is
// *much* faster than going through prepare/exec
if len(args) == 0 {
// ignore commandTag, our caller doesn't care
r, _, err := cn.simpleExec(query)
return r, err
}
if cn.binaryParameters {
cn.sendBinaryModeQuery(query, args)
cn.readParseResponse()
cn.readBindResponse()
cn.readPortalDescribeResponse()
cn.postExecuteWorkaround()
res, _, err = cn.readExecuteResponse("Execute")
return res, err
}
// Use the unnamed statement to defer planning until bind
// time, or else value-based selectivity estimates cannot be
// used.
st := cn.prepareTo(query, "")
r, err := st.Exec(args)
if err != nil {
panic(err)
}
return r, err
}
func (cn *conn) send(m *writeBuf) {
_, err := cn.c.Write(m.wrap())
if err != nil {
panic(err)
}
}
func (cn *conn) sendStartupPacket(m *writeBuf) error {
_, err := cn.c.Write((m.wrap())[1:])
return err
}
// Send a message of type typ to the server on the other end of cn. The
// message should have no payload. This method does not use the scratch
// buffer.
func (cn *conn) sendSimpleMessage(typ byte) (err error) {
_, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'})
return err
}
// saveMessage memorizes a message and its buffer in the conn struct.
// recvMessage will then return these values on the next call to it. This
// method is useful in cases where you have to see what the next message is
// going to be (e.g. to see whether it's an error or not) but you can't handle
// the message yourself.
func (cn *conn) saveMessage(typ byte, buf *readBuf) {
if cn.saveMessageType != 0 {
cn.bad = true
errorf("unexpected saveMessageType %d", cn.saveMessageType)
}
cn.saveMessageType = typ
cn.saveMessageBuffer = *buf
}
// recvMessage receives any message from the backend, or returns an error if
// a problem occurred while reading the message.
func (cn *conn) recvMessage(r *readBuf) (byte, error) {
// workaround for a QueryRow bug, see exec
if cn.saveMessageType != 0 {
t := cn.saveMessageType
*r = cn.saveMessageBuffer
cn.saveMessageType = 0
cn.saveMessageBuffer = nil
return t, nil
}
x := cn.scratch[:5]
_, err := io.ReadFull(cn.buf, x)
if err != nil {
return 0, err
}
// read the type and length of the message that follows
t := x[0]
n := int(binary.BigEndian.Uint32(x[1:])) - 4
var y []byte
if n <= len(cn.scratch) {
y = cn.scratch[:n]
} else {
y = make([]byte, n)
}
_, err = io.ReadFull(cn.buf, y)
if err != nil {
return 0, err
}
*r = y
return t, nil
}
// recv receives a message from the backend, but if an error happened while
// reading the message or the received message was an ErrorResponse, it panics.
// NoticeResponses are ignored. This function should generally be used only
// during the startup sequence.
func (cn *conn) recv() (t byte, r *readBuf) {
for {
var err error
r = &readBuf{}
t, err = cn.recvMessage(r)
if err != nil {
panic(err)
}
switch t {
case 'E':
panic(parseError(r))
case 'N':
if n := cn.noticeHandler; n != nil {
n(parseError(r))
}
default:
return
}
}
}
// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by
// the caller to avoid an allocation.
func (cn *conn) recv1Buf(r *readBuf) byte {
for {
t, err := cn.recvMessage(r)
if err != nil {
panic(err)
}
switch t {
case 'A':
// ignore
case 'N':
if n := cn.noticeHandler; n != nil {
n(parseError(r))
}
case 'S':
cn.processParameterStatus(r)
default:
return t
}
}
}
// recv1 receives a message from the backend, panicking if an error occurs
// while attempting to read it. All asynchronous messages are ignored, with
// the exception of ErrorResponse.
func (cn *conn) recv1() (t byte, r *readBuf) {
r = &readBuf{}
t = cn.recv1Buf(r)
return t, r
}
func (cn *conn) ssl(o values) error {
upgrade, err := ssl(o)
if err != nil {
return err
}
if upgrade == nil {
// Nothing to do
return nil
}
w := cn.writeBuf(0)
w.int32(80877103)
if err = cn.sendStartupPacket(w); err != nil {
return err
}
b := cn.scratch[:1]
_, err = io.ReadFull(cn.c, b)
if err != nil {
return err
}
if b[0] != 'S' {
return ErrSSLNotSupported
}
cn.c, err = upgrade(cn.c)
return err
}
// isDriverSetting returns true iff a setting is purely for configuring the
// driver's options and should not be sent to the server in the connection
// startup packet.
func isDriverSetting(key string) bool {
switch key {
case "host", "port":
return true
case "password":
return true
case "sslmode", "sslcert", "sslkey", "sslrootcert":
return true
case "fallback_application_name":
return true
case "connect_timeout":
return true
case "disable_prepared_binary_result":
return true
case "binary_parameters":
return true
default:
return false
}
}
func (cn *conn) startup(o values) {
w := cn.writeBuf(0)
w.int32(196608)
// Send the backend the name of the database we want to connect to, and the
// user we want to connect as. Additionally, we send over any run-time
// parameters potentially included in the connection string. If the server
// doesn't recognize any of them, it will reply with an error.
for k, v := range o {
if isDriverSetting(k) {
// skip options which can't be run-time parameters
continue
}
// The protocol requires us to supply the database name as "database"
// instead of "dbname".
if k == "dbname" {
k = "database"
}
w.string(k)
w.string(v)
}
w.string("")
if err := cn.sendStartupPacket(w); err != nil {
panic(err)
}
for {
t, r := cn.recv()
switch t {
case 'K':
cn.processBackendKeyData(r)
case 'S':
cn.processParameterStatus(r)
case 'R':
cn.auth(r, o)
case 'Z':
cn.processReadyForQuery(r)
return
default:
errorf("unknown response for startup: %q", t)
}
}
}
func (cn *conn) auth(r *readBuf, o values) {
switch code := r.int32(); code {
case 0:
// OK
case 3:
w := cn.writeBuf('p')
w.string(o["password"])
cn.send(w)
t, r := cn.recv()
if t != 'R' {
errorf("unexpected password response: %q", t)
}
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
case 5:
s := string(r.next(4))
w := cn.writeBuf('p')
w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
cn.send(w)
t, r := cn.recv()
if t != 'R' {
errorf("unexpected password response: %q", t)
}
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
default:
errorf("unknown authentication response: %d", code)
}
}
type format int
const formatText format = 0
const formatBinary format = 1
// One result-column format code with the value 1 (i.e. all binary).
var colFmtDataAllBinary = []byte{0, 1, 0, 1}
// No result-column format codes (i.e. all text).
var colFmtDataAllText = []byte{0, 0}
type stmt struct {
cn *conn
name string
colNames []string
colFmts []format
colFmtData []byte
colTyps []fieldDesc
paramTyps []oid.Oid
closed bool
}
func (st *stmt) Close() (err error) {
if st.closed {
return nil
}
if st.cn.bad {
return driver.ErrBadConn
}
defer st.cn.errRecover(&err)
w := st.cn.writeBuf('C')
w.byte('S')
w.string(st.name)
st.cn.send(w)
st.cn.send(st.cn.writeBuf('S'))
t, _ := st.cn.recv1()
if t != '3' {
st.cn.bad = true
errorf("unexpected close response: %q", t)
}
st.closed = true
t, r := st.cn.recv1()
if t != 'Z' {
st.cn.bad = true
errorf("expected ready for query, but got: %q", t)
}
st.cn.processReadyForQuery(r)
return nil
}
func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
if st.cn.bad {
return nil, driver.ErrBadConn
}
defer st.cn.errRecover(&err)
st.exec(v)
return &rows{
cn: st.cn,
colNames: st.colNames,
colTyps: st.colTyps,
colFmts: st.colFmts,
}, nil
}
func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) {
if st.cn.bad {
return nil, driver.ErrBadConn
}
defer st.cn.errRecover(&err)
st.exec(v)
res, _, err = st.cn.readExecuteResponse("simple query")
return res, err
}
func (st *stmt) exec(v []driver.Value) {
if len(v) >= 65536 {
errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
}
if len(v) != len(st.paramTyps) {
errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
}
cn := st.cn
w := cn.writeBuf('B')
w.byte(0) // unnamed portal
w.string(st.name)
if cn.binaryParameters {
cn.sendBinaryParameters(w, v)
} else {
w.int16(0)
w.int16(len(v))
for i, x := range v {
if x == nil {
w.int32(-1)
} else {
b := encode(&cn.parameterStatus, x, st.paramTyps[i])
w.int32(len(b))
w.bytes(b)
}
}
}
w.bytes(st.colFmtData)
w.next('E')
w.byte(0)
w.int32(0)
w.next('S')
cn.send(w)
cn.readBindResponse()
cn.postExecuteWorkaround()
}
func (st *stmt) NumInput() int {
return len(st.paramTyps)
}
// parseComplete parses the "command tag" from a CommandComplete message, and
// returns the number of rows affected (if applicable) and a string
// identifying only the command that was executed, e.g. "ALTER TABLE". If the
// command tag could not be parsed, parseComplete panics.
func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
commandsWithAffectedRows := []string{
"SELECT ",
// INSERT is handled below
"UPDATE ",
"DELETE ",
"FETCH ",
"MOVE ",
"COPY ",
}
var affectedRows *string
for _, tag := range commandsWithAffectedRows {
if strings.HasPrefix(commandTag, tag) {
t := commandTag[len(tag):]
affectedRows = &t
commandTag = tag[:len(tag)-1]
break
}
}
// INSERT also includes the oid of the inserted row in its command tag.
// Oids in user tables are deprecated, and the oid is only returned when
// exactly one row is inserted, so it's unlikely to be of value to any
// real-world application and we can ignore it.
if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") {
parts := strings.Split(commandTag, " ")
if len(parts) != 3 {
cn.bad = true
errorf("unexpected INSERT command tag %s", commandTag)
}
affectedRows = &parts[len(parts)-1]
commandTag = "INSERT"
}
// There should be no affected rows attached to the tag, just return it
if affectedRows == nil {
return driver.RowsAffected(0), commandTag
}
n, err := strconv.ParseInt(*affectedRows, 10, 64)
if err != nil {
cn.bad = true
errorf("could not parse commandTag: %s", err)
}
return driver.RowsAffected(n), commandTag
}
type rows struct {
cn *conn
finish func()
colNames []string
colTyps []fieldDesc
colFmts []format
done bool
rb readBuf
result driver.Result
tag string
}
func (rs *rows) Close() error {
if finish := rs.finish; finish != nil {
defer finish()
}
// no need to look at cn.bad as Next() will
for {
err := rs.Next(nil)
switch err {
case nil:
case io.EOF:
// rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row
// description, used with HasNextResultSet). We need to fetch messages until
// we hit a 'Z', which is done by waiting for done to be set.
if rs.done {
return nil
}
default:
return err
}
}
}
func (rs *rows) Columns() []string {
return rs.colNames
}
func (rs *rows) Result() driver.Result {
if rs.result == nil {
return emptyRows
}
return rs.result
}
func (rs *rows) Tag() string {
return rs.tag
}
func (rs *rows) Next(dest []driver.Value) (err error) {
if rs.done {
return io.EOF
}
conn := rs.cn
if conn.bad {
return driver.ErrBadConn
}
defer conn.errRecover(&err)
for {
t := conn.recv1Buf(&rs.rb)
switch t {
case 'E':
err = parseError(&rs.rb)
case 'C', 'I':
if t == 'C' {
rs.result, rs.tag = conn.parseComplete(rs.rb.string())
}
continue
case 'Z':
conn.processReadyForQuery(&rs.rb)
rs.done = true
if err != nil {
return err
}
return io.EOF
case 'D':
n := rs.rb.int16()
if err != nil {
conn.bad = true
errorf("unexpected DataRow after error %s", err)
}
if n < len(dest) {
dest = dest[:n]
}
for i := range dest {
l := rs.rb.int32()
if l == -1 {
dest[i] = nil
continue
}
dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
}
return
case 'T':
rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb)
return io.EOF
default:
errorf("unexpected message after execute: %q", t)
}
}
}
func (rs *rows) HasNextResultSet() bool {
return !rs.done
}
func (rs *rows) NextResultSet() error {
return nil
}
// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
// used as part of an SQL statement. For example:
//
// tblname := "my_table"
// data := "my_data"
// quoted := pq.QuoteIdentifier(tblname)
// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
//
// Any double quotes in name will be escaped. The quoted identifier will be
// case sensitive when used in a query. If the input string contains a zero
// byte, the result will be truncated immediately before it.
func QuoteIdentifier(name string) string {
end := strings.IndexRune(name, 0)
if end > -1 {
name = name[:end]
}
return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
}
func md5s(s string) string {
h := md5.New()
h.Write([]byte(s))
return fmt.Sprintf("%x", h.Sum(nil))
}
func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) {
// Do one pass over the parameters to see if we're going to send any of
// them over in binary. If we are, create a paramFormats array at the
// same time.
var paramFormats []int
for i, x := range args {
_, ok := x.([]byte)
if ok {
if paramFormats == nil {
paramFormats = make([]int, len(args))
}
paramFormats[i] = 1
}
}
if paramFormats == nil {
b.int16(0)
} else {
b.int16(len(paramFormats))
for _, x := range paramFormats {
b.int16(x)
}
}
b.int16(len(args))
for _, x := range args {
if x == nil {
b.int32(-1)
} else {
datum := binaryEncode(&cn.parameterStatus, x)
b.int32(len(datum))
b.bytes(datum)
}
}
}
func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) {
if len(args) >= 65536 {
errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args))
}
b := cn.writeBuf('P')
b.byte(0) // unnamed statement
b.string(query)
b.int16(0)
b.next('B')
b.int16(0) // unnamed portal and statement
cn.sendBinaryParameters(b, args)
b.bytes(colFmtDataAllText)
b.next('D')
b.byte('P')
b.byte(0) // unnamed portal
b.next('E')
b.byte(0)
b.int32(0)
b.next('S')
cn.send(b)
}
func (cn *conn) processParameterStatus(r *readBuf) {
var err error
param := r.string()
switch param {
case "server_version":
var major1 int
var major2 int
var minor int
_, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor)
if err == nil {
cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor
}
case "TimeZone":
cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string())
if err != nil {
cn.parameterStatus.currentLocation = nil
}
default:
// ignore
}
}
func (cn *conn) processReadyForQuery(r *readBuf) {
cn.txnStatus = transactionStatus(r.byte())
}
func (cn *conn) readReadyForQuery() {
t, r := cn.recv1()
switch t {
case 'Z':
cn.processReadyForQuery(r)
return
default:
cn.bad = true
errorf("unexpected message %q; expected ReadyForQuery", t)
}
}
func (cn *conn) processBackendKeyData(r *readBuf) {
cn.processID = r.int32()
cn.secretKey = r.int32()
}
func (cn *conn) readParseResponse() {
t, r := cn.recv1()
switch t {
case '1':
return
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Parse response %q", t)
}
}
func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
for {
t, r := cn.recv1()
switch t {
case 't':
nparams := r.int16()
paramTyps = make([]oid.Oid, nparams)
for i := range paramTyps {
paramTyps[i] = r.oid()
}
case 'n':
return paramTyps, nil, nil
case 'T':
colNames, colTyps = parseStatementRowDescribe(r)
return paramTyps, colNames, colTyps
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Describe statement response %q", t)
}
}
}
func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
t, r := cn.recv1()
switch t {
case 'T':
return parsePortalRowDescribe(r)
case 'n':
return nil, nil, nil
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Describe response %q", t)
}
panic("not reached")
}
func (cn *conn) readBindResponse() {
t, r := cn.recv1()
switch t {
case '2':
return
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
default:
cn.bad = true
errorf("unexpected Bind response %q", t)
}
}
func (cn *conn) postExecuteWorkaround() {
// Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores
// any errors from rows.Next, which masks errors that happened during the
// execution of the query. To avoid the problem in common cases, we wait
// here for one more message from the database. If it's not an error the
// query will likely succeed (or perhaps has already, if it's a
// CommandComplete), so we push the message into the conn struct; recv1
// will return it as the next message for rows.Next or rows.Close.
// However, if it's an error, we wait until ReadyForQuery and then return
// the error to our caller.
for {
t, r := cn.recv1()
switch t {
case 'E':
err := parseError(r)
cn.readReadyForQuery()
panic(err)
case 'C', 'D', 'I':
// the query didn't fail, but we can't process this message
cn.saveMessage(t, r)
return
default:
cn.bad = true
errorf("unexpected message during extended query execution: %q", t)
}
}
}
// Only for Exec(), since we ignore the returned data
func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) {
for {
t, r := cn.recv1()
switch t {
case 'C':
if err != nil {
cn.bad = true
errorf("unexpected CommandComplete after error %s", err)
}
res, commandTag = cn.parseComplete(r.string())
case 'Z':
cn.processReadyForQuery(r)
if res == nil && err == nil {
err = errUnexpectedReady
}
return res, commandTag, err
case 'E':
err = parseError(r)
case 'T', 'D', 'I':
if err != nil {
cn.bad = true
errorf("unexpected %q after error %s", t, err)
}
if t == 'I' {
res = emptyRows
}
// ignore any results
default:
cn.bad = true
errorf("unknown %s response: %q", protocolState, t)
}
}
}
func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
colTyps[i].OID = r.oid()
colTyps[i].Len = r.int16()
colTyps[i].Mod = r.int32()
// format code not known when describing a statement; always 0
r.next(2)
}
return
}
func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
n := r.int16()
colNames = make([]string, n)
colFmts = make([]format, n)
colTyps = make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
colTyps[i].OID = r.oid()
colTyps[i].Len = r.int16()
colTyps[i].Mod = r.int32()
colFmts[i] = format(r.int16())
}
return
}
// parseEnviron tries to mimic some of libpq's environment handling
//
// To ease testing, it does not directly reference os.Environ, but is
// designed to accept its output.
//
// Environment-set connection information is intended to have a higher
// precedence than a library default but lower than any explicitly
// passed information (such as in the URL or connection string).
func parseEnviron(env []string) (out map[string]string) {
out = make(map[string]string)
for _, v := range env {
parts := strings.SplitN(v, "=", 2)
accrue := func(keyname string) {
out[keyname] = parts[1]
}
unsupported := func() {
panic(fmt.Sprintf("setting %v not supported", parts[0]))
}
// The order of these is the same as is seen in the
// PostgreSQL 9.1 manual. Unsupported but well-defined
// keys cause a panic; these should be unset prior to
// execution. Options which pq expects to be set to a
// certain value are allowed, but must be set to that
// value if present (they can, of course, be absent).
switch parts[0] {
case "PGHOST":
accrue("host")
case "PGHOSTADDR":
unsupported()
case "PGPORT":
accrue("port")
case "PGDATABASE":
accrue("dbname")
case "PGUSER":
accrue("user")
case "PGPASSWORD":
accrue("password")
case "PGSERVICE", "PGSERVICEFILE", "PGREALM":
unsupported()
case "PGOPTIONS":
accrue("options")
case "PGAPPNAME":
accrue("application_name")
case "PGSSLMODE":
accrue("sslmode")
case "PGSSLCERT":
accrue("sslcert")
case "PGSSLKEY":
accrue("sslkey")
case "PGSSLROOTCERT":
accrue("sslrootcert")
case "PGREQUIRESSL", "PGSSLCRL":
unsupported()
case "PGREQUIREPEER":
unsupported()
case "PGKRBSRVNAME", "PGGSSLIB":
unsupported()
case "PGCONNECT_TIMEOUT":
accrue("connect_timeout")
case "PGCLIENTENCODING":
accrue("client_encoding")
case "PGDATESTYLE":
accrue("datestyle")
case "PGTZ":
accrue("timezone")
case "PGGEQO":
accrue("geqo")
case "PGSYSCONFDIR", "PGLOCALEDIR":
unsupported()
}
}
return out
}
// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8".
func isUTF8(name string) bool {
// Recognize all sorts of silly things as "UTF-8", like Postgres does
s := strings.Map(alnumLowerASCII, name)
return s == "utf8" || s == "unicode"
}
func alnumLowerASCII(ch rune) rune {
if 'A' <= ch && ch <= 'Z' {
return ch + ('a' - 'A')
}
if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' {
return ch
}
return -1 // discard
}
|
[
"\"PGPASSFILE\"",
"\"HOME\""
] |
[] |
[
"HOME",
"PGPASSFILE"
] |
[]
|
["HOME", "PGPASSFILE"]
|
go
| 2 | 0 | |
tests/manual/test_qaoa.py
|
import unittest
import networkx as nx
import numpy as np
from docplex.mp.model import Model
from qiskit import BasicAer
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import SPSA
from qiskit.optimization.applications.ising import docplex, max_cut
from qiskit.optimization.applications.ising.common import sample_most_likely
from quantastica.qiskit_toaster import ToasterJob
try:
from . import common
except Exception:
import common
import os
@unittest.skipUnless(
os.getenv("SLOW") == "1",
"Skipping this test (environment variable SLOW must be set to 1)",
)
class TestQAOA(common.TestToasterBase):
def test_qaoa(self):
print("Running toaster test...")
toaster_backend = self.toaster_backend("qasm_simulator")
toaster_results = self.run_simulation(toaster_backend)
print("Run time:", ToasterJob.ToasterJob._run_time, "seconds")
print("Running AER test...")
aer_backend = BasicAer.get_backend("qasm_simulator")
aer_results = self.run_simulation(aer_backend)
print("===== Calculations done =====")
print(" ==== AER Results =====")
print(aer_results)
print(" ==== Toaster Results =====")
print(toaster_results)
threshold = 0.9
aer_k = abs(
aer_results["maxcut_objective"] / aer_results["solution_objective"]
)
toaster_k = abs(
toaster_results["maxcut_objective"]
/ toaster_results["solution_objective"]
)
self.assertGreater(aer_k, threshold)
self.assertGreater(toaster_k, threshold)
def run_simulation(self, backend):
seed = int(os.environ.get("SEED", "40598"))
n = int(os.environ.get("N", "4"))
#
# Random 3-regular graph with 12 nodes
#
graph = nx.random_regular_graph(3, n, seed=seed)
for e in graph.edges():
graph[e[0]][e[1]]["weight"] = 1.0
# Compute the weight matrix from the graph
w = np.zeros([n, n])
for i in range(n):
for j in range(n):
temp = graph.get_edge_data(i, j, default=0)
if temp != 0:
w[i, j] = temp["weight"]
# Create an Ising Hamiltonian with docplex.
mdl = Model(name="max_cut")
mdl.node_vars = mdl.binary_var_list(list(range(n)), name="node")
maxcut_func = mdl.sum(
w[i, j] * mdl.node_vars[i] * (1 - mdl.node_vars[j])
for i in range(n)
for j in range(n)
)
mdl.maximize(maxcut_func)
qubit_op, offset = docplex.get_operator(mdl)
aqua_globals.random_seed = seed
# Run quantum algorithm QAOA on qasm simulator
spsa = SPSA(max_trials=250)
qaoa = QAOA(qubit_op, spsa, p=5, max_evals_grouped=4)
quantum_instance = QuantumInstance(
backend,
shots=1024,
seed_simulator=seed,
seed_transpiler=seed,
optimization_level=0,
)
result = qaoa.run(quantum_instance)
x = sample_most_likely(result["eigvecs"][0])
result["solution"] = max_cut.get_graph_solution(x)
result["solution_objective"] = max_cut.max_cut_value(x, w)
result["maxcut_objective"] = result["energy"] + offset
"""
print("energy:", result["energy"])
print("time:", result["eval_time"])
print("max-cut objective:", result["energy"] + offset)
print("solution:", max_cut.get_graph_solution(x))
print("solution objective:", max_cut.max_cut_value(x, w))
"""
return result
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"SLOW",
"SEED",
"N"
] |
[]
|
["SLOW", "SEED", "N"]
|
python
| 3 | 0 | |
server/src/main/java/com/netflix/conductor/bootstrap/Main.java
|
/**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
/**
*
*/
package com.netflix.conductor.bootstrap;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch;
import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider;
import com.netflix.conductor.grpc.server.GRPCServerProvider;
import com.netflix.conductor.jetty.server.JettyServerProvider;
import org.apache.log4j.PropertyConfigurator;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Optional;
import java.util.Properties;
/**
* @author Viren Entry point for the server
*/
public class Main {
private static final int EMBEDDED_ES_INIT_TIME = 5000;
/**
* Server 端的启动类
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// 加载Conductor 的配置文件
loadConfigFile(args.length > 0 ? args[0] : System.getenv("CONDUCTOR_CONFIG_FILE"));
if (args.length == 2) {
System.out.println("Using log4j config " + args[1]);
PropertyConfigurator.configure(new FileInputStream(new File(args[1])));
}
Injector bootstrapInjector = Guice.createInjector(new BootstrapModule());
ModulesProvider modulesProvider = bootstrapInjector.getInstance(ModulesProvider.class);
Injector serverInjector = Guice.createInjector(modulesProvider.get());
Optional<EmbeddedElasticSearch> embeddedSearchInstance = serverInjector.getInstance(EmbeddedElasticSearchProvider.class).get();
if (embeddedSearchInstance.isPresent()) {
try {
embeddedSearchInstance.get().start();
/*
* Elasticsearch embedded instance does not notify when it is up and ready to accept incoming requests.
* A possible solution for reading and writing into the index is to wait a specific amount of time.
*/
Thread.sleep(EMBEDDED_ES_INIT_TIME);
} catch (Exception ioe) {
ioe.printStackTrace(System.err);
System.exit(3);
}
}
try {
serverInjector.getInstance(IndexDAO.class).setup();
} catch (Exception e){
e.printStackTrace(System.err);
System.exit(3);
}
System.out.println("\n\n\n");
System.out.println(" _ _ ");
System.out.println(" ___ ___ _ __ __| |_ _ ___| |_ ___ _ __ ");
System.out.println(" / __/ _ \\| '_ \\ / _` | | | |/ __| __/ _ \\| '__|");
System.out.println("| (_| (_) | | | | (_| | |_| | (__| || (_) | | ");
System.out.println(" \\___\\___/|_| |_|\\__,_|\\__,_|\\___|\\__\\___/|_| ");
System.out.println("\n\n\n");
// 启动 Server 端的 GRPCServer (gRpc)服务
serverInjector.getInstance(GRPCServerProvider.class).get().ifPresent(server -> {
try {
// 启动 Server 端的 GRPCServer (gRpc)服务
server.start();
} catch (IOException ioe) {
ioe.printStackTrace(System.err);
System.exit(3);
}
});
// 启动 Server 端的 JettyServer (jetty) 服务
serverInjector.getInstance(JettyServerProvider.class).get().ifPresent(server -> {
try {
// [start ]启动 Server 端的 JettyServer (jetty) 服务
server.start();
} catch (Exception ioe) {
ioe.printStackTrace(System.err);
System.exit(3);
}
});
}
// 加载配置文件5
private static void loadConfigFile(String propertyFile) throws IOException {
if (propertyFile == null) return;
System.out.println("Using config file: " + propertyFile);
Properties props = new Properties(System.getProperties());
props.load(new FileInputStream(propertyFile));
System.setProperties(props);
}
}
|
[
"\"CONDUCTOR_CONFIG_FILE\""
] |
[] |
[
"CONDUCTOR_CONFIG_FILE"
] |
[]
|
["CONDUCTOR_CONFIG_FILE"]
|
java
| 1 | 0 | |
build/releases/release-0.518/src/lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-version")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False, True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else ":" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc, source, port, nonce, hop_count, ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts and recent-hops.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (lisp_gleaned_groups.has_key(seid_str)):
if (lisp_gleaned_groups[seid_str].has_key(group_str)):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#------------------------------------------------------------------------------
|
[] |
[] |
[
"LISP_RTR_BEHIND_NAT",
"LISP_USE_POLY",
"LISP_SEND_ICMP_TOO_BIG",
"LISP_USE_CHACHA",
"LISP_ENFORCE_BINDTODEVICE",
"LISP_RUN_LISP_XTR",
"LISP_USE_AES_GCM",
"LISP_GEO_ZOOM_LEVEL",
"LISP_ADDR_SELECT",
"LISP_IGNORE_DF_BIT",
"LISP_NO_CRYPTO"
] |
[]
|
["LISP_RTR_BEHIND_NAT", "LISP_USE_POLY", "LISP_SEND_ICMP_TOO_BIG", "LISP_USE_CHACHA", "LISP_ENFORCE_BINDTODEVICE", "LISP_RUN_LISP_XTR", "LISP_USE_AES_GCM", "LISP_GEO_ZOOM_LEVEL", "LISP_ADDR_SELECT", "LISP_IGNORE_DF_BIT", "LISP_NO_CRYPTO"]
|
python
| 11 | 0 | |
backend/covid19_site/settings_prod.py
|
"""
Django settings for covid19_site project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'dslab-covid19-backend.herokuapp.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'model_api.apps.ModelApiConfig',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = [
# React dev server.
'http://localhost:3000',
'http://127.0.0.1:3000',
]
ROOT_URLCONF = 'covid19_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'covid19_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
testsuite.py
|
# Copyright (c) 2011-2018, Ulf Magnusson
# SPDX-License-Identifier: ISC
# This is the Kconfiglib test suite. It runs selftests on Kconfigs provided by
# us and tests compatibility with the C Kconfig implementation by comparing the
# output of Kconfiglib with the output of the scripts/kconfig/*conf utilities
# for different targets and defconfigs. It should be run from the top-level
# kernel directory with
#
# $ python Kconfiglib/testsuite.py
#
# Some additional options can be turned on by passing them as arguments. They
# default to off.
#
# - obsessive:
# By default, only valid arch/defconfig pairs are tested. In obsessive mode,
# every arch will be tested with every defconfig. Increases the testing time
# by an order of magnitude. Occasionally finds (usually obscure) bugs, and I
# make sure everything passes with it.
#
# - obsessive-min-config:
# Like obsessive, for the minimal configuation (defconfig) tests.
#
# - log:
# Log timestamped defconfig test failures to the file test_defconfig_fails.
# Handy in obsessive mode.
#
# For example, this commands runs the test suite in obsessive mode with logging
# enabled:
#
# $ python(3) Kconfiglib/testsuite.py obsessive log
#
# pypy works too, and runs most tests much faster than CPython.
#
# All tests should pass. Report regressions to ulfalizer a.t Google's email
# service.
from kconfiglib import Kconfig, Symbol, Choice, COMMENT, MENU, MenuNode, \
BOOL, TRISTATE, HEX, STRING, \
TRI_TO_STR, \
escape, unescape, \
expr_str, expr_value, expr_items, split_expr, \
_ordered_unique, \
OR, AND, \
KconfigError
import difflib
import errno
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
def shell(cmd):
with open(os.devnull, "w") as devnull:
subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
all_passed = True
def fail(msg=None):
global all_passed
all_passed = False
if msg is not None:
print("fail: " + msg)
def verify(cond, msg):
if not cond:
fail(msg)
def verify_equal(x, y):
if x != y:
fail("'{}' does not equal '{}'".format(x, y))
# Prevent accidental loading of configuration files by removing
# KCONFIG_ALLCONFIG from the environment
os.environ.pop("KCONFIG_ALLCONFIG", None)
obsessive = False
obsessive_min_config = False
log = False
def run_tests():
global obsessive, log
for s in sys.argv[1:]:
if s == "obsessive":
obsessive = True
print("Obsessive mode enabled")
elif s == "obsessive-min-config":
obsessive_min_config = True
print("Obsessive minimal config mode enabled")
elif s == "log":
log = True
print("Log mode enabled")
else:
print("Unrecognized option '{}'".format(s))
return
run_selftests()
run_compatibility_tests()
def run_selftests():
#
# Common helper functions. These all expect 'c' to hold the current
# configuration.
#
def verify_value(sym_name, val):
"""
Verifies that a symbol has a particular value.
"""
if isinstance(val, int):
val = TRI_TO_STR[val]
sym = c.syms[sym_name]
verify(sym.str_value == val,
'expected {} to have the value "{}", had the value "{}"'
.format(sym_name, val, sym.str_value))
def assign_and_verify_value(sym_name, val, new_val):
"""
Assigns 'val' to a symbol and verifies that its value becomes
'new_val'. Assumes (and tests) that 'val' is valid for the
symbol type.
"""
if isinstance(new_val, int):
new_val = TRI_TO_STR[new_val]
sym = c.syms[sym_name]
old_val = sym.str_value
verify(sym.set_value(val),
"assigning '{}' to {} unexpectedly failed"
.format(val, sym_name))
verify(sym.str_value == new_val,
"expected {} to have the value '{}' after being assigned the "
"value '{}'. Instead, the value is '{}'. The old value was "
"'{}'."
.format(sym_name, new_val, val, sym.str_value, old_val))
def assign_and_verify(sym_name, user_val):
"""
Like assign_and_verify_value(), with the expected value being the
value just set.
"""
assign_and_verify_value(sym_name, user_val, user_val)
def assign_and_verify_user_value(sym_name, val, user_val, valid):
"""
Assigns a user value to the symbol and verifies the new user value. If
valid is True, the user value is valid for the type, otherwise not.
This is used to test the set_value() return value.
"""
sym = c.syms[sym_name]
sym_old_user_val = sym.user_value
verify(sym.set_value(val) == valid,
"expected the user value '{}' to be {} for {}, was not"
.format(val, "valid" if valid else "invalid", sym_name))
verify(sym.user_value == user_val,
"the assigned user value '{}' wasn't reflected in user_value "
"on the symbol {}. Instead, the new user_value was '{}'. The "
"old user value was '{}'."
.format(user_val, sym_name, sym.user_value, sym_old_user_val))
#
# Selftests
#
print("Testing string literal lexing")
# Dummy empty configuration just to get a Kconfig object
c = Kconfig("Kconfiglib/tests/empty")
def verify_string_lex(s, expected):
"""
Verifies that a constant symbol with the name 'res' is produced from
lexing 's'
"""
res = c._tokenize("if " + s)[1].name
verify(res == expected,
"expected <{}> to produced the constant symbol <{}>, "
'produced <{}>'.format(s[1:-1], expected, res))
verify_string_lex(r""" "" """, "")
verify_string_lex(r""" '' """, "")
verify_string_lex(r""" "a" """, "a")
verify_string_lex(r""" 'a' """, "a")
verify_string_lex(r""" "ab" """, "ab")
verify_string_lex(r""" 'ab' """, "ab")
verify_string_lex(r""" "abc" """, "abc")
verify_string_lex(r""" 'abc' """, "abc")
verify_string_lex(r""" "'" """, "'")
verify_string_lex(r""" '"' """, '"')
verify_string_lex(r""" "\"" """, '"')
verify_string_lex(r""" '\'' """, "'")
verify_string_lex(r""" "\"\"" """, '""')
verify_string_lex(r""" '\'\'' """, "''")
verify_string_lex(r""" "\'" """, "'")
verify_string_lex(r""" '\"' """, '"')
verify_string_lex(r""" "\\" """, "\\")
verify_string_lex(r""" '\\' """, "\\")
verify_string_lex(r""" "\a\\'\b\c\"'d" """, 'a\\\'bc"\'d')
verify_string_lex(r""" '\a\\"\b\c\'"d' """, "a\\\"bc'\"d")
def verify_string_bad(s):
"""
Verifies that tokenizing 's' throws a KconfigError. Strips the first
and last characters from 's' so we can use readable raw strings as
input.
"""
try:
c.eval_string(s)
except KconfigError:
pass
else:
fail("expected tokenization of {} to fail, didn't".format(s[1:-1]))
verify_string_bad(r""" " """)
verify_string_bad(r""" ' """)
verify_string_bad(r""" "' """)
verify_string_bad(r""" '" """)
verify_string_bad(r""" "\" """)
verify_string_bad(r""" '\' """)
verify_string_bad(r""" "foo """)
verify_string_bad(r""" 'foo """)
print("Testing escape() and unescape()")
def verify_escape_unescape(s, sesc):
# Verify that 's' escapes to 'sesc' and that 'sesc' unescapes to 's'
verify_equal(escape(s), sesc)
verify_equal(unescape(sesc), s)
verify_escape_unescape(r'' , r'' )
verify_escape_unescape(r'foo' , r'foo' )
verify_escape_unescape(r'"' , r'\"' )
verify_escape_unescape(r'""' , r'\"\"' )
verify_escape_unescape('\\' , r'\\' )
verify_escape_unescape(r'\\' , r'\\\\' )
verify_escape_unescape(r'\"' , r'\\\"' )
verify_escape_unescape(r'"ab\cd"ef"', r'\"ab\\cd\"ef\"')
# Backslashes before any character should be unescaped, not just before "
# and \
verify_equal(unescape(r"\afoo\b\c\\d\\\e\\\\f"), r"afoobc\d\e\\f")
print("Testing _ordered_unique()")
verify_equal(_ordered_unique([]), [])
verify_equal(_ordered_unique([1]), [1])
verify_equal(_ordered_unique([1, 2]), [1, 2])
verify_equal(_ordered_unique([1, 1]), [1])
verify_equal(_ordered_unique([1, 1, 2]), [1, 2])
verify_equal(_ordered_unique([1, 2, 1]), [1, 2])
verify_equal(_ordered_unique([1, 2, 2]), [1, 2])
verify_equal(_ordered_unique([1, 2, 3, 2, 1, 2, 3, 4, 3, 2, 1, 0]),
[1, 2, 3, 4, 0])
print("Testing expression evaluation")
c = Kconfig("Kconfiglib/tests/Keval", warn=False)
def verify_eval(expr, val):
res = c.eval_string(expr)
verify(res == val,
"'{}' evaluated to {}, expected {}".format(expr, res, val))
# No modules
verify_eval("n", 0)
verify_eval("m", 0)
verify_eval("y", 2)
verify_eval("'n'", 0)
verify_eval("'m'", 0)
verify_eval("'y'", 2)
verify_eval("M", 2)
# Modules
c.modules.set_value(2)
verify_eval("n", 0)
verify_eval("m", 1)
verify_eval("y", 2)
verify_eval("'n'", 0)
verify_eval("'m'", 1)
verify_eval("'y'", 2)
verify_eval("M", 1)
verify_eval("(Y || N) && (m && y)", 1)
# Non-bool/non-tristate symbols are always n in a tristate sense
verify_eval("Y_STRING", 0)
verify_eval("Y_STRING || m", 1)
# As are all constants besides y and m
verify_eval('"foo"', 0)
verify_eval('"foo" || "bar"', 0)
verify_eval('"foo" || m', 1)
# Test equality for symbols
verify_eval("N = N", 2)
verify_eval("N = n", 2)
verify_eval("N = 'n'", 2)
verify_eval("N != N", 0)
verify_eval("N != n", 0)
verify_eval("N != 'n'", 0)
verify_eval("M = M", 2)
verify_eval("M = m", 2)
verify_eval("M = 'm'", 2)
verify_eval("M != M", 0)
verify_eval("M != m", 0)
verify_eval("M != 'm'", 0)
verify_eval("Y = Y", 2)
verify_eval("Y = y", 2)
verify_eval("Y = 'y'", 2)
verify_eval("Y != Y", 0)
verify_eval("Y != y", 0)
verify_eval("Y != 'y'", 0)
verify_eval("N != M", 2)
verify_eval("N != Y", 2)
verify_eval("M != Y", 2)
verify_eval("Y_STRING = y", 2)
verify_eval("Y_STRING = 'y'", 2)
verify_eval('FOO_BAR_STRING = "foo bar"', 2)
verify_eval('FOO_BAR_STRING != "foo bar baz"', 2)
verify_eval('INT_37 = 37', 2)
verify_eval("INT_37 = '37'", 2)
verify_eval('HEX_0X37 = 0x37', 2)
verify_eval("HEX_0X37 = '0x37'", 2)
# These should also hold after 31847b67 (kconfig: allow use of relations
# other than (in)equality)
verify_eval("HEX_0X37 = '0x037'", 2)
verify_eval("HEX_0X37 = '0x0037'", 2)
# Constant symbol comparisons
verify_eval('"foo" != "bar"', 2)
verify_eval('"foo" = "bar"', 0)
verify_eval('"foo" = "foo"', 2)
# Undefined symbols get their name as their value
c.disable_warnings()
verify_eval("'not_defined' = not_defined", 2)
verify_eval("not_defined_2 = not_defined_2", 2)
verify_eval("not_defined_1 != not_defined_2", 2)
# Test less than/greater than
# Basic evaluation
verify_eval("INT_37 < 38", 2)
verify_eval("38 < INT_37", 0)
verify_eval("INT_37 < '38'", 2)
verify_eval("'38' < INT_37", 0)
verify_eval("INT_37 < 138", 2)
verify_eval("138 < INT_37", 0)
verify_eval("INT_37 < '138'", 2)
verify_eval("'138' < INT_37", 0)
verify_eval("INT_37 < -138", 0)
verify_eval("-138 < INT_37", 2)
verify_eval("INT_37 < '-138'", 0)
verify_eval("'-138' < INT_37", 2)
verify_eval("INT_37 < 37", 0)
verify_eval("37 < INT_37", 0)
verify_eval("INT_37 < 36", 0)
verify_eval("36 < INT_37", 2)
# Different formats in comparison
verify_eval("INT_37 < 0x26", 2) # 38
verify_eval("INT_37 < 0x25", 0) # 37
verify_eval("INT_37 < 0x24", 0) # 36
verify_eval("HEX_0X37 < 56", 2) # 0x38
verify_eval("HEX_0X37 < 55", 0) # 0x37
verify_eval("HEX_0X37 < 54", 0) # 0x36
# Other int comparisons
verify_eval("INT_37 <= 38", 2)
verify_eval("INT_37 <= 37", 2)
verify_eval("INT_37 <= 36", 0)
verify_eval("INT_37 > 38", 0)
verify_eval("INT_37 > 37", 0)
verify_eval("INT_37 > 36", 2)
verify_eval("INT_37 >= 38", 0)
verify_eval("INT_37 >= 37", 2)
verify_eval("INT_37 >= 36", 2)
# Other hex comparisons
verify_eval("HEX_0X37 <= 0x38", 2)
verify_eval("HEX_0X37 <= 0x37", 2)
verify_eval("HEX_0X37 <= 0x36", 0)
verify_eval("HEX_0X37 > 0x38", 0)
verify_eval("HEX_0X37 > 0x37", 0)
verify_eval("HEX_0X37 > 0x36", 2)
verify_eval("HEX_0X37 >= 0x38", 0)
verify_eval("HEX_0X37 >= 0x37", 2)
verify_eval("HEX_0X37 >= 0x36", 2)
# A hex holding a value without a "0x" prefix should still be treated as
# hexadecimal
verify_eval("HEX_37 < 0x38", 2)
verify_eval("HEX_37 < 0x37", 0)
verify_eval("HEX_37 < 0x36", 0)
# Symbol comparisons
verify_eval("INT_37 < HEX_0X37", 2)
verify_eval("INT_37 > HEX_0X37", 0)
verify_eval("HEX_0X37 < INT_37 ", 0)
verify_eval("HEX_0X37 > INT_37 ", 2)
verify_eval("INT_37 < INT_37 ", 0)
verify_eval("INT_37 <= INT_37 ", 2)
verify_eval("INT_37 > INT_37 ", 0)
verify_eval("INT_37 <= INT_37 ", 2)
# Tristate value comparisons
verify_eval("n < n", 0)
verify_eval("n < m", 2)
verify_eval("n < y", 2)
verify_eval("n < N", 0)
verify_eval("n < M", 2)
verify_eval("n < Y", 2)
verify_eval("0 > n", 0)
verify_eval("1 > n", 2)
verify_eval("2 > n", 2)
verify_eval("m < n", 0)
verify_eval("m < m", 0)
verify_eval("m < y", 2)
# Strings compare lexicographically
verify_eval("'aa' < 'ab'", 2)
verify_eval("'aa' > 'ab'", 0)
verify_eval("'ab' < 'aa'", 0)
verify_eval("'ab' > 'aa'", 2)
# Comparisons where one of the operands doesn't parse as a number also give
# a lexicographic comparison
verify_eval("INT_37 < '37a' ", 2)
verify_eval("'37a' > INT_37", 2)
verify_eval("INT_37 <= '37a' ", 2)
verify_eval("'37a' >= INT_37", 2)
verify_eval("INT_37 >= '37a' ", 0)
verify_eval("INT_37 > '37a' ", 0)
verify_eval("'37a' < INT_37", 0)
verify_eval("'37a' <= INT_37", 0)
def verify_eval_bad(expr):
try:
c.eval_string(expr)
except KconfigError:
pass
else:
fail('expected eval_string("{}") to throw KconfigError, '
"didn't".format(expr))
# Verify that some bad stuff throws KconfigError's
verify_eval_bad("")
verify_eval_bad("&")
verify_eval_bad("|")
verify_eval_bad("!")
verify_eval_bad("(")
verify_eval_bad(")")
verify_eval_bad("=")
verify_eval_bad("(X")
verify_eval_bad("X)")
verify_eval_bad("X X")
verify_eval_bad("!X X")
verify_eval_bad("X !X")
verify_eval_bad("(X) X")
verify_eval_bad("X &&")
verify_eval_bad("&& X")
verify_eval_bad("X && && X")
verify_eval_bad("X && !&&")
verify_eval_bad("X ||")
verify_eval_bad("|| X")
print("Testing Symbol.__str__()/custom_str() and def_{int,hex,string}")
def verify_str(item, s):
verify_equal(str(item), s[1:])
def verify_custom_str(item, s):
verify_equal(item.custom_str(lambda sc: "[{}]".format(sc.name)), s[1:])
c = Kconfig("Kconfiglib/tests/Kstr", warn=False)
c.modules.set_value(2)
verify_str(c.syms["UNDEFINED"], """
""")
verify_str(c.syms["BASIC_NO_PROMPT"], """
config BASIC_NO_PROMPT
bool
help
blah blah
blah blah blah
blah
""")
verify_str(c.syms["BASIC_PROMPT"], """
config BASIC_PROMPT
bool
prompt "basic"
""")
verify_str(c.syms["ADVANCED"], """
config ADVANCED
tristate
prompt "prompt" if DEP
default DEFAULT_1
default DEFAULT_2 if DEP
select SELECTED_1
select SELECTED_2 if DEP
imply IMPLIED_1
imply IMPLIED_2 if DEP
help
first help text
config ADVANCED
tristate
prompt "prompt 2"
menuconfig ADVANCED
tristate
prompt "prompt 3"
config ADVANCED
tristate
depends on (A || !B || (C && D) || !(E && F) || G = H || (I && !J && (K || L) && !(M || N) && O = P)) && DEP4 && DEP3
help
second help text
""")
verify_custom_str(c.syms["ADVANCED"], """
config ADVANCED
tristate
prompt "prompt" if [DEP]
default [DEFAULT_1]
default [DEFAULT_2] if [DEP]
select [SELECTED_1]
select [SELECTED_2] if [DEP]
imply [IMPLIED_1]
imply [IMPLIED_2] if [DEP]
help
first help text
config ADVANCED
tristate
prompt "prompt 2"
menuconfig ADVANCED
tristate
prompt "prompt 3"
config ADVANCED
tristate
depends on ([A] || ![B] || ([C] && [D]) || !([E] && [F]) || [G] = [H] || ([I] && ![J] && ([K] || [L]) && !([M] || [N]) && [O] = [P])) && [DEP4] && [DEP3]
help
second help text
""")
verify_str(c.syms["ONLY_DIRECT_DEPS"], """
config ONLY_DIRECT_DEPS
int
depends on DEP1 && DEP2
""")
verify_str(c.syms["STRING"], """
config STRING
string
default "foo"
default "bar" if DEP
default STRING2
default STRING3 if DEP
""")
verify_str(c.syms["INT"], """
config INT
int
range 1 2
range FOO BAR
range BAZ QAZ if DEP
default 7 if DEP
""")
verify_str(c.syms["HEX"], """
config HEX
hex
range 0x100 0x200
range FOO BAR
range BAZ QAZ if DEP
default 0x123
""")
verify_str(c.modules, """
config MODULES
bool
prompt "MODULES"
option modules
""")
verify_str(c.syms["OPTIONS"], """
config OPTIONS
option allnoconfig_y
option defconfig_list
option env="ENV"
""")
verify_str(c.syms["CORRECT_PROP_LOCS_BOOL"], """
config CORRECT_PROP_LOCS_BOOL
bool
prompt "prompt 1" if LOC_1
default DEFAULT_1 if LOC_1
default DEFAULT_2 if LOC_1
select SELECT_1 if LOC_1
select SELECT_2 if LOC_1
imply IMPLY_1 if LOC_1
imply IMPLY_2 if LOC_1
depends on LOC_1
help
help 1
menuconfig CORRECT_PROP_LOCS_BOOL
bool
prompt "prompt 2" if LOC_2
default DEFAULT_3 if LOC_2
default DEFAULT_4 if LOC_2
select SELECT_3 if LOC_2
select SELECT_4 if LOC_2
imply IMPLY_3 if LOC_2
imply IMPLY_4 if LOC_2
depends on LOC_2
help
help 2
config CORRECT_PROP_LOCS_BOOL
bool
prompt "prompt 3" if LOC_3
default DEFAULT_5 if LOC_3
default DEFAULT_6 if LOC_3
select SELECT_5 if LOC_3
select SELECT_6 if LOC_3
imply IMPLY_5 if LOC_3
imply IMPLY_6 if LOC_3
depends on LOC_3
help
help 2
""")
verify_str(c.syms["CORRECT_PROP_LOCS_INT"], """
config CORRECT_PROP_LOCS_INT
int
range 1 2 if LOC_1
range 3 4 if LOC_1
depends on LOC_1
config CORRECT_PROP_LOCS_INT
int
range 5 6 if LOC_2
range 7 8 if LOC_2
depends on LOC_2
""")
verify_custom_str(c.syms["CORRECT_PROP_LOCS_INT"], """
config CORRECT_PROP_LOCS_INT
int
range [1] [2] if [LOC_1]
range [3] [4] if [LOC_1]
depends on [LOC_1]
config CORRECT_PROP_LOCS_INT
int
range [5] [6] if [LOC_2]
range [7] [8] if [LOC_2]
depends on [LOC_2]
""")
print("Testing Choice.__str__()/custom_str()")
verify_str(c.named_choices["CHOICE"], """
choice CHOICE
tristate
prompt "foo"
default CHOICE_1
default CHOICE_2 if dep
""")
verify_str(c.named_choices["CHOICE"].nodes[0].next.item, """
choice
tristate
prompt "no name"
optional
""")
verify_str(c.named_choices["CORRECT_PROP_LOCS_CHOICE"], """
choice CORRECT_PROP_LOCS_CHOICE
bool
default CHOICE_3 if LOC_1
depends on LOC_1
choice CORRECT_PROP_LOCS_CHOICE
bool
default CHOICE_4 if LOC_2
depends on LOC_2
choice CORRECT_PROP_LOCS_CHOICE
bool
default CHOICE_5 if LOC_3
depends on LOC_3
""")
verify_custom_str(c.named_choices["CORRECT_PROP_LOCS_CHOICE"], """
choice CORRECT_PROP_LOCS_CHOICE
bool
default [CHOICE_3] if [LOC_1]
depends on [LOC_1]
choice CORRECT_PROP_LOCS_CHOICE
bool
default [CHOICE_4] if [LOC_2]
depends on [LOC_2]
choice CORRECT_PROP_LOCS_CHOICE
bool
default [CHOICE_5] if [LOC_3]
depends on [LOC_3]
""")
print("Testing MenuNode.__str__()/custom_str() for menus and comments")
verify_str(c.syms["SIMPLE_MENU_HOOK"].nodes[0].next, """
menu "simple menu"
""")
verify_str(c.syms["ADVANCED_MENU_HOOK"].nodes[0].next, """
menu "advanced menu"
depends on A
visible if B && (C || D)
""")
verify_custom_str(c.syms["ADVANCED_MENU_HOOK"].nodes[0].next, """
menu "advanced menu"
depends on [A]
visible if [B] && ([C] || [D])
""")
verify_str(c.syms["SIMPLE_COMMENT_HOOK"].nodes[0].next, """
comment "simple comment"
""")
verify_str(c.syms["ADVANCED_COMMENT_HOOK"].nodes[0].next, """
comment "advanced comment"
depends on A && B
""")
verify_custom_str(c.syms["ADVANCED_COMMENT_HOOK"].nodes[0].next, """
comment "advanced comment"
depends on [A] && [B]
""")
print("Testing Symbol.__repr__()")
def verify_repr(item, s):
verify_equal(repr(item) + "\n", s[1:])
c = Kconfig("Kconfiglib/tests/Krepr", warn=False)
verify_repr(c.n, """
<symbol n, tristate, value n, constant>
""")
verify_repr(c.m, """
<symbol m, tristate, value m, constant>
""")
verify_repr(c.y, """
<symbol y, tristate, value y, constant>
""")
verify_repr(c.syms["UNDEFINED"], """
<symbol UNDEFINED, unknown, value "UNDEFINED", visibility n, direct deps n, undefined>
""")
verify_repr(c.syms["BASIC"], """
<symbol BASIC, bool, value y, visibility n, direct deps y, Kconfiglib/tests/Krepr:9>
""")
verify_repr(c.syms["VISIBLE"], """
<symbol VISIBLE, bool, "visible", value n, visibility y, direct deps y, Kconfiglib/tests/Krepr:14>
""")
c.syms["VISIBLE"].set_value(2)
verify_repr(c.syms["VISIBLE"], """
<symbol VISIBLE, bool, "visible", value y, user value y, visibility y, direct deps y, Kconfiglib/tests/Krepr:14>
""")
verify_repr(c.syms["DIR_DEP_N"], """
<symbol DIR_DEP_N, unknown, value "DIR_DEP_N", visibility n, direct deps n, Kconfiglib/tests/Krepr:17>
""")
verify_repr(c.syms["OPTIONS"], """
<symbol OPTIONS, unknown, value "OPTIONS", visibility n, allnoconfig_y, is the defconfig_list symbol, from environment variable ENV, direct deps y, Kconfiglib/tests/Krepr:20>
""")
verify_repr(c.syms["MULTI_DEF"], """
<symbol MULTI_DEF, unknown, value "MULTI_DEF", visibility n, direct deps y, Kconfiglib/tests/Krepr:25, Kconfiglib/tests/Krepr:26>
""")
verify_repr(c.syms["CHOICE_1"], """
<symbol CHOICE_1, tristate, "choice sym", value n, visibility m, choice symbol, direct deps m, Kconfiglib/tests/Krepr:33>
""")
verify_repr(c.modules, """
<symbol MODULES, bool, value y, visibility n, is the modules symbol, direct deps y, Kconfiglib/tests/Krepr:1>
""")
print("Testing Choice.__repr__()")
verify_repr(c.named_choices["CHOICE"], """
<choice CHOICE, tristate, "choice", mode m, visibility y, Kconfiglib/tests/Krepr:30>
""")
c.named_choices["CHOICE"].set_value(2)
verify_repr(c.named_choices["CHOICE"], """
<choice CHOICE, tristate, "choice", mode y, user mode y, CHOICE_1 selected, visibility y, Kconfiglib/tests/Krepr:30>
""")
c.syms["CHOICE_2"].set_value(2)
verify_repr(c.named_choices["CHOICE"], """
<choice CHOICE, tristate, "choice", mode y, user mode y, CHOICE_2 selected, CHOICE_2 selected by user, visibility y, Kconfiglib/tests/Krepr:30>
""")
c.named_choices["CHOICE"].set_value(1)
verify_repr(c.named_choices["CHOICE"], """
<choice CHOICE, tristate, "choice", mode m, user mode m, CHOICE_2 selected by user (overridden), visibility y, Kconfiglib/tests/Krepr:30>
""")
verify_repr(c.syms["CHOICE_HOOK"].nodes[0].next.item, """
<choice, tristate, "optional choice", mode n, visibility n, optional, Kconfiglib/tests/Krepr:43>
""")
print("Testing MenuNode.__repr__()")
verify_repr(c.syms["BASIC"].nodes[0], """
<menu node for symbol BASIC, deps y, has help, has next, Kconfiglib/tests/Krepr:9>
""")
verify_repr(c.syms["DIR_DEP_N"].nodes[0], """
<menu node for symbol DIR_DEP_N, deps n, has next, Kconfiglib/tests/Krepr:17>
""")
verify_repr(c.syms["MULTI_DEF"].nodes[0], """
<menu node for symbol MULTI_DEF, deps y, has next, Kconfiglib/tests/Krepr:25>
""")
verify_repr(c.syms["MULTI_DEF"].nodes[1], """
<menu node for symbol MULTI_DEF, deps y, has next, Kconfiglib/tests/Krepr:26>
""")
verify_repr(c.syms["MENUCONFIG"].nodes[0], """
<menu node for symbol MENUCONFIG, is menuconfig, deps y, has next, Kconfiglib/tests/Krepr:28>
""")
verify_repr(c.named_choices["CHOICE"].nodes[0], """
<menu node for choice CHOICE, prompt "choice" (visibility y), deps y, has child, has next, Kconfiglib/tests/Krepr:30>
""")
verify_repr(c.syms["CHOICE_HOOK"].nodes[0].next, """
<menu node for choice, prompt "optional choice" (visibility n), deps y, has next, Kconfiglib/tests/Krepr:43>
""")
verify_repr(c.syms["NO_VISIBLE_IF_HOOK"].nodes[0].next, """
<menu node for menu, prompt "no visible if" (visibility y), deps y, 'visible if' deps y, has next, Kconfiglib/tests/Krepr:50>
""")
verify_repr(c.syms["VISIBLE_IF_HOOK"].nodes[0].next, """
<menu node for menu, prompt "visible if" (visibility y), deps y, 'visible if' deps m, has next, Kconfiglib/tests/Krepr:55>
""")
verify_repr(c.syms["COMMENT_HOOK"].nodes[0].next, """
<menu node for comment, prompt "comment" (visibility y), deps y, Kconfiglib/tests/Krepr:61>
""")
print("Testing Kconfig.__repr__()")
verify_repr(c, """
<configuration with 14 symbols, main menu prompt "Main menu", srctree is current directory, config symbol prefix "CONFIG_", warnings disabled, printing of warnings to stderr enabled, undef. symbol assignment warnings disabled, redundant symbol assignment warnings enabled>
""")
os.environ["srctree"] = "Kconfiglib"
os.environ["CONFIG_"] = "CONFIG_ value"
c = Kconfig("tests/Krepr", warn=False)
c.enable_warnings()
c.disable_stderr_warnings()
c.disable_redun_warnings()
c.enable_undef_warnings()
verify_repr(c, """
<configuration with 14 symbols, main menu prompt "Main menu", srctree "Kconfiglib", config symbol prefix "CONFIG_ value", warnings enabled, printing of warnings to stderr disabled, undef. symbol assignment warnings enabled, redundant symbol assignment warnings disabled>
""")
os.environ.pop("srctree", None)
os.environ.pop("CONFIG_", None)
print("Testing tricky help strings")
c = Kconfig("Kconfiglib/tests/Khelp")
def verify_help(node, s):
verify_equal(node.help, s[1:])
verify_help(c.syms["TWO_HELP_STRINGS"].nodes[0], """
first help string
""")
verify_help(c.syms["TWO_HELP_STRINGS"].nodes[1], """
second help string
""")
verify_help(c.syms["NO_BLANK_AFTER_HELP"].nodes[0], """
help for
NO_BLANK_AFTER_HELP
""")
verify_help(c.named_choices["CHOICE_HELP"].nodes[0], """
help for
CHOICE_HELP
""")
verify_help(c.syms["HELP_TERMINATED_BY_COMMENT"].nodes[0], """
a
b
c
""")
verify_help(c.syms["TRICKY_HELP"].nodes[0], """
a
b
c
d
e
f
g
h
i
""")
print("Testing locations, source/rsource/gsource/grsource, and "
"Kconfig.kconfig_filenames")
def verify_locations(nodes, *expected_locs):
verify(len(nodes) == len(expected_locs),
"Wrong number of locations for " + repr(nodes))
for node, expected_loc in zip(nodes, expected_locs):
node_loc = "{}:{}".format(node.filename, node.linenr)
verify(node_loc == expected_loc,
"expected {} to have the location {}, had the location {}"
.format(repr(node), expected_loc, node_loc))
# Expanded in the 'source' statement in Klocation
os.environ["TESTS_DIR_FROM_ENV"] = "tests"
os.environ["SUB_DIR_FROM_ENV"] = "sub"
os.environ["_SOURCED"] = "_sourced"
os.environ["_RSOURCED"] = "_rsourced"
os.environ["_GSOURCED"] = "_gsourced"
os.environ["_GRSOURCED"] = "_grsourced"
# Test twice, with $srctree as a relative and an absolute path,
# respectively
for srctree in "Kconfiglib", os.path.abspath("Kconfiglib"):
os.environ["srctree"] = srctree
# Has symbol with empty help text, so disable warnings
c = Kconfig("tests/Klocation", warn=False)
verify_locations(c.syms["SINGLE_DEF"].nodes, "tests/Klocation:4")
verify_locations(c.syms["MULTI_DEF"].nodes,
"tests/Klocation:7",
"tests/Klocation:37",
"tests/Klocation_sourced:3",
"tests/sub/Klocation_rsourced:2",
"tests/sub/Klocation_gsourced1:1",
"tests/sub/Klocation_gsourced2:1",
"tests/sub/Klocation_gsourced1:1",
"tests/sub/Klocation_gsourced2:1",
"tests/sub/Klocation_grsourced1:1",
"tests/sub/Klocation_grsourced2:1",
"tests/sub/Klocation_grsourced1:1",
"tests/sub/Klocation_grsourced2:1",
"tests/Klocation:70")
verify_locations(c.named_choices["CHOICE"].nodes,
"tests/Klocation_sourced:5")
verify_locations([c.syms["MENU_HOOK"].nodes[0].next],
"tests/Klocation_sourced:12")
verify_locations([c.syms["COMMENT_HOOK"].nodes[0].next],
"tests/Klocation_sourced:18")
# Test Kconfig.kconfig_filenames
verify_equal(c.kconfig_filenames, [
"tests/Klocation",
"tests/Klocation_sourced",
"tests/sub/Klocation_rsourced",
"tests/sub/Klocation_gsourced1",
"tests/sub/Klocation_gsourced2",
"tests/sub/Klocation_gsourced1",
"tests/sub/Klocation_gsourced2",
"tests/sub/Klocation_grsourced1",
"tests/sub/Klocation_grsourced2",
"tests/sub/Klocation_grsourced1",
"tests/sub/Klocation_grsourced2"
])
# Test recursive 'source' detection
try:
Kconfig("tests/Krecursive1")
except KconfigError as e:
verify_equal(str(e), """
tests/Krecursive2:1: Recursive 'source' of 'tests/Krecursive1' detected. Check that environment variables are set correctly.
Include path:
tests/Krecursive1:1
tests/Krecursive2:1
"""[:-1])
except:
fail("recursive 'source' raised wrong exception")
else:
fail("recursive 'source' did not raise exception")
# Verify that source and rsource throw exceptions for missing files
# TODO: Make an exception test helper
try:
Kconfig("tests/Kmissingsource")
except KconfigError as e:
if "does not exist" not in str(e):
fail("'source' with missing file raised wrong KconfigError")
except:
fail("'source' with missing file raised wrong exception")
else:
fail("'source' with missing file did not raise exception")
try:
Kconfig("tests/Kmissingrsource")
except KconfigError as e:
if "does not exist" not in str(e):
fail("'rsource' with missing file raised wrong KconfigError")
except:
fail("'rsource' with missing file raised wrong exception")
else:
fail("'rsource' with missing file did not raise exception")
print("Testing Kconfig.node_iter()")
# Reuse tests/Klocation. The node_iter(unique_syms=True) case already gets
# plenty of testing from write_config() as well.
c = Kconfig("tests/Klocation", warn=False)
verify_equal(
[node.item.name for node in c.node_iter()
if isinstance(node.item, Symbol)],
["SINGLE_DEF", "MULTI_DEF", "HELP_1", "HELP_2", "HELP_3", "MULTI_DEF",
"MULTI_DEF", "MENU_HOOK", "COMMENT_HOOK"] + 10*["MULTI_DEF"])
verify_equal(
[node.item.name for node in c.node_iter(True)
if isinstance(node.item, Symbol)],
["SINGLE_DEF", "MULTI_DEF", "HELP_1", "HELP_2", "HELP_3", "MENU_HOOK",
"COMMENT_HOOK"])
verify_equal(
[node.prompt[0] for node in c.node_iter()
if not isinstance(node.item, Symbol)],
["choice", "menu", "comment"])
verify_equal(
[node.prompt[0] for node in c.node_iter(True)
if not isinstance(node.item, Symbol)],
["choice", "menu", "comment"])
# Get rid of custom 'srctree' from Klocation test
os.environ.pop("srctree", None)
print("Testing MenuNode.include_path")
os.environ["srctree"] = "Kconfiglib/tests"
c = Kconfig("Kinclude_path")
def verify_node_path(node, *expected):
if node.include_path != expected:
fail("Wrong include path for node {!r}. Got {}, expected {}."
.format(node, node.include_path, expected))
def verify_sym_path(sym_name, node_i, *expected):
verify_node_path(c.syms[sym_name].nodes[node_i], *expected)
verify_sym_path("TOP", 0)
verify_sym_path("TOP", 1)
verify_sym_path("TOP", 2)
verify_sym_path("ONE_DOWN", 0, ("Kinclude_path", 4))
verify_sym_path("ONE_DOWN", 1, ("Kinclude_path", 4))
verify_sym_path("ONE_DOWN", 2, ("Kinclude_path", 4))
verify_sym_path("ONE_DOWN", 3, ("Kinclude_path", 9))
verify_sym_path("ONE_DOWN", 4, ("Kinclude_path", 9))
verify_sym_path("ONE_DOWN", 5, ("Kinclude_path", 9))
verify_sym_path("TWO_DOWN", 0,
("Kinclude_path", 4), ("Kinclude_path_sourced_1", 4))
verify_sym_path("TWO_DOWN", 1,
("Kinclude_path", 4), ("Kinclude_path_sourced_1", 9))
verify_sym_path("TWO_DOWN", 2,
("Kinclude_path", 9), ("Kinclude_path_sourced_1", 4))
verify_sym_path("TWO_DOWN", 3,
("Kinclude_path", 9), ("Kinclude_path_sourced_1", 9))
verify_node_path(c.top_node)
verify_node_path(c.menus[0], ("Kinclude_path", 4), ("Kinclude_path_sourced_1", 4))
verify_node_path(c.comments[0], ("Kinclude_path", 4), ("Kinclude_path_sourced_1", 4))
verify_node_path(c.choices[0].nodes[0], ("Kinclude_path", 4), ("Kinclude_path_sourced_1", 4))
os.environ.pop("srctree", None)
print("Testing Kconfig.choices/menus/comments")
c = Kconfig("Kconfiglib/tests/Kitemlists")
def verify_prompts(items, *expected_prompts):
verify(len(items) == len(expected_prompts),
"Wrong number of prompts for {}".format(items))
for item, expected_prompt in zip(items, expected_prompts):
if not isinstance(item, MenuNode):
item = item.nodes[0]
verify(item.prompt[0] == expected_prompt,
"Wrong prompt for {}, expected '{}'"
.format(repr(item), expected_prompt))
verify_prompts(c.choices, "choice 1", "choice 2", "choice 3")
verify_prompts(c.menus, "menu 1", "menu 2", "menu 3", "menu 4", "menu 5")
verify_prompts(c.comments, "comment 1", "comment 2", "comment 3")
print("Testing Symbol/Choice.direct_dep")
c = Kconfig("Kconfiglib/tests/Kdirdep")
verify_equal(expr_str(c.syms["NO_DEP_SYM"].direct_dep), '"y"')
verify_equal(expr_str(c.syms["DEP_SYM"].direct_dep), "A || (B && C) || !D")
verify_equal(expr_str(c.named_choices["NO_DEP_CHOICE"].direct_dep), '"y"')
verify_equal(expr_str(c.named_choices["DEP_CHOICE"].direct_dep),
"A || B || C")
print("Testing expr_items()")
c = Kconfig("Kconfiglib/tests/Kexpr_items")
def verify_expr_items(expr, *sym_names):
verify_equal(tuple(sorted(item.name for item in expr_items(expr))),
sym_names)
verify_expr_items(
c.syms["TEST"].defaults[0][0],
"A", "B", "C", "D", "E", "F", "G", "H"
)
verify_expr_items(
c.syms["TEST_CHOICE"].nodes[0].prompt[1],
"A", "CHOICE"
)
print("Testing MenuNode/Symbol/Choice.referenced")
c = Kconfig("Kconfiglib/tests/Kreferenced", warn=False)
def verify_deps(item, *dep_names):
verify_equal(tuple(sorted(item.name for item in item.referenced)),
dep_names)
verify_deps(c.top_node, "y")
verify_deps(c.syms["NO_REFS"].nodes[0], "y")
verify_deps(c.syms["JUST_DEPENDS_ON_REFS"].nodes[0], "A", "B")
verify_deps(c.syms["LOTS_OF_REFS"].nodes[0],
*(chr(n) for n in range(ord("A"), ord("Z") + 1)))
verify_deps(c.syms["INT_REFS"].nodes[0],
"A", "B", "C", "D", "E", "F", "G", "H", "y")
verify_deps(c.syms["CHOICE_REF"].nodes[0], "CHOICE")
verify_deps(c.menus[0], "A", "B", "C", "D")
verify_deps(c.comments[0], "A", "B")
verify_deps(c.syms["MULTI_DEF_SYM"], "A", "B", "C", "y")
verify_deps(c.named_choices["MULTI_DEF_CHOICE"], "A", "B", "C")
print("Testing split_expr()")
c = Kconfig("Kconfiglib/tests/empty")
c.disable_warnings()
def verify_split(to_split, op, operand_strs):
# The same hackage as in Kconfig.eval_string()
c._tokens = c._tokenize("if " + to_split)[1:]
c._tokens_i = -1
operands = split_expr(c._parse_expr(False), op)
verify(len(operands) == len(operand_strs),
"Wrong number of operands when {} was split by {}"
.format(to_split, "OR" if op == OR else "AND"))
for operand, operand_str in zip(operands, operand_strs):
verify_equal(expr_str(operand), operand_str)
verify_split("A", OR, ("A", ))
verify_split("!A", OR, ("!A", ))
verify_split("A = B", OR, ("A = B", ))
verify_split("A && B", OR, ("A && B", ))
verify_split("A || B", OR, ("A", "B" ))
verify_split("(A || B) || C", OR, ("A", "B", "C" ))
verify_split("A || (B || C)", OR, ("A", "B", "C" ))
verify_split("A || !(B || C)", OR, ("A", "!(B || C)" ))
verify_split("A || (B && (C || D))", OR, ("A", "B && (C || D)"))
verify_split("(A && (B || C)) || D", OR, ("A && (B || C)", "D"))
verify_split("A", AND, ("A", ))
verify_split("!A", AND, ("!A", ))
verify_split("A = B", AND, ("A = B", ))
verify_split("A || B", AND, ("A || B", ))
verify_split("A && B", AND, ("A", "B" ))
verify_split("(A && B) && C", AND, ("A", "B", "C" ))
verify_split("A && (B && C)", AND, ("A", "B", "C" ))
verify_split("A && !(B && C)", AND, ("A", "!(B && C)" ))
verify_split("A && (B || (C && D))", AND, ("A", "B || (C && D)"))
verify_split("(A || (B && C)) && D", AND, ("A || (B && C)", "D"))
print("Testing visibility")
c = Kconfig("Kconfiglib/tests/Kvisibility")
def verify_visibility(item, no_module_vis, module_vis):
c.modules.set_value(0)
verify(item.visibility == no_module_vis,
"expected {} to have visibility {} without modules, had "
"visibility {}".
format(repr(item), no_module_vis, item.visibility))
c.modules.set_value(2)
verify(item.visibility == module_vis,
"expected {} to have visibility {} with modules, had "
"visibility {}".
format(repr(item), module_vis, item.visibility))
# Symbol visibility
verify_visibility(c.syms["NO_PROMPT"], 0, 0)
verify_visibility(c.syms["BOOL_N"], 0, 0)
verify_visibility(c.syms["BOOL_M"], 0, 2)
verify_visibility(c.syms["BOOL_MOD"], 2, 2)
verify_visibility(c.syms["BOOL_Y"], 2, 2)
verify_visibility(c.syms["TRISTATE_M"], 0, 1)
verify_visibility(c.syms["TRISTATE_MOD"], 2, 1)
verify_visibility(c.syms["TRISTATE_Y"], 2, 2)
verify_visibility(c.syms["BOOL_IF_N"], 0, 0)
verify_visibility(c.syms["BOOL_IF_M"], 0, 2)
verify_visibility(c.syms["BOOL_IF_Y"], 2, 2)
verify_visibility(c.syms["BOOL_MENU_N"], 0, 0)
verify_visibility(c.syms["BOOL_MENU_M"], 0, 2)
verify_visibility(c.syms["BOOL_MENU_Y"], 2, 2)
verify_visibility(c.syms["BOOL_CHOICE_N"], 0, 0)
# Non-tristate symbols in tristate choices are only visible if the choice
# is in y mode
# The choice can't be brought to y mode because of the 'if m'
verify_visibility(c.syms["BOOL_CHOICE_M"], 0, 0)
c.syms["BOOL_CHOICE_M"].choice.set_value(2)
verify_visibility(c.syms["BOOL_CHOICE_M"], 0, 0)
# The choice gets y mode only when running without modules, because it
# defaults to m mode
verify_visibility(c.syms["BOOL_CHOICE_Y"], 2, 0)
c.syms["BOOL_CHOICE_Y"].choice.set_value(2)
# When set to y mode, the choice symbol becomes visible both with and
# without modules
verify_visibility(c.syms["BOOL_CHOICE_Y"], 2, 2)
verify_visibility(c.syms["TRISTATE_IF_N"], 0, 0)
verify_visibility(c.syms["TRISTATE_IF_M"], 0, 1)
verify_visibility(c.syms["TRISTATE_IF_Y"], 2, 2)
verify_visibility(c.syms["TRISTATE_MENU_N"], 0, 0)
verify_visibility(c.syms["TRISTATE_MENU_M"], 0, 1)
verify_visibility(c.syms["TRISTATE_MENU_Y"], 2, 2)
verify_visibility(c.syms["TRISTATE_CHOICE_N"], 0, 0)
verify_visibility(c.syms["TRISTATE_CHOICE_M"], 0, 1)
verify_visibility(c.syms["TRISTATE_CHOICE_Y"], 2, 2)
verify_visibility(c.named_choices["BOOL_CHOICE_N"], 0, 0)
verify_visibility(c.named_choices["BOOL_CHOICE_M"], 0, 2)
verify_visibility(c.named_choices["BOOL_CHOICE_Y"], 2, 2)
verify_visibility(c.named_choices["TRISTATE_CHOICE_N"], 0, 0)
verify_visibility(c.named_choices["TRISTATE_CHOICE_M"], 0, 1)
verify_visibility(c.named_choices["TRISTATE_CHOICE_Y"], 2, 2)
verify_visibility(c.named_choices["TRISTATE_CHOICE_IF_M_AND_Y"], 0, 1)
verify_visibility(c.named_choices["TRISTATE_CHOICE_MENU_N_AND_Y"], 0, 0)
# Verify that 'visible if' visibility gets propagated to prompts
verify_visibility(c.syms["VISIBLE_IF_N"], 0, 0)
verify_visibility(c.syms["VISIBLE_IF_M"], 0, 1)
verify_visibility(c.syms["VISIBLE_IF_Y"], 2, 2)
verify_visibility(c.syms["VISIBLE_IF_M_2"], 0, 1)
# Verify that string/int/hex symbols with m visibility accept a user value
assign_and_verify("STRING_m", "foo bar")
assign_and_verify("INT_m", "123")
assign_and_verify("HEX_m", "0x123")
print("Testing .assignable")
c = Kconfig("Kconfiglib/tests/Kassignable")
def verify_assignable_imp(item, assignable_no_modules, assignable_modules):
"""
Verifies the assignable values for 'item', with and without modules.
"""
for modules_val, assignable in (0, assignable_no_modules), \
(2, assignable_modules):
c.modules.set_value(modules_val)
module_msg = "without modules" if modules_val == 0 else \
"with modules"
verify(item.assignable == assignable,
"Incorrect assignable values for {} {}. Should be {}, "
"was {}."
.format(item.name, module_msg, assignable, item.assignable))
# Verify that the values can actually be assigned too
for val in item.assignable:
item.set_value(val)
verify(item.tri_value == val,
"Unable to set {} to {} {}, even though it was in "
".assignable".format(item.name, val, module_msg))
def verify_assignable(sym_name, assignable_no_modules, assignable_modules):
verify_assignable_imp(c.syms[sym_name],
assignable_no_modules,
assignable_modules)
def verify_const_unassignable(sym_name):
verify_assignable_imp(c.const_syms[sym_name], (), ())
# Things that shouldn't be .assignable
verify_const_unassignable("n")
verify_const_unassignable("m")
verify_const_unassignable("y")
verify_const_unassignable("const")
verify_assignable("UNDEFINED", (), ())
verify_assignable("NO_PROMPT", (), ())
verify_assignable("STRING", (), ())
verify_assignable("INT", (), ())
verify_assignable("HEX", (), ())
# Non-selected symbols
verify_assignable("Y_VIS_BOOL", (0, 2), (0, 2))
verify_assignable("M_VIS_BOOL", ( ), (0, 2)) # Vis. promoted
verify_assignable("N_VIS_BOOL", ( ), ( ))
verify_assignable("Y_VIS_TRI", (0, 2), (0, 1, 2))
verify_assignable("M_VIS_TRI", ( ), (0, 1 ))
verify_assignable("N_VIS_TRI", ( ), ( ))
# Symbols selected to y
verify_assignable("Y_SEL_Y_VIS_BOOL", (2,), (2,))
verify_assignable("Y_SEL_M_VIS_BOOL", ( ), (2,)) # Vis. promoted
verify_assignable("Y_SEL_N_VIS_BOOL", ( ), ( ))
verify_assignable("Y_SEL_Y_VIS_TRI", (2,), (2,))
verify_assignable("Y_SEL_M_VIS_TRI", ( ), (2,))
verify_assignable("Y_SEL_N_VIS_TRI", ( ), ( ))
# Symbols selected to m
verify_assignable("M_SEL_Y_VIS_BOOL", (2,), ( 2,)) # Value promoted
verify_assignable("M_SEL_M_VIS_BOOL", ( ), ( 2,)) # Vis./value promoted
verify_assignable("M_SEL_N_VIS_BOOL", ( ), ( ))
verify_assignable("M_SEL_Y_VIS_TRI", (2,), (1, 2 ))
verify_assignable("M_SEL_M_VIS_TRI", ( ), (1, ))
verify_assignable("M_SEL_N_VIS_TRI", ( ), ( ))
# Symbols implied to y
verify_assignable("Y_IMP_Y_VIS_BOOL", (0, 2), (0, 2))
verify_assignable("Y_IMP_M_VIS_BOOL", ( ), (0, 2)) # Vis. promoted
verify_assignable("Y_IMP_N_VIS_BOOL", ( ), ( ))
verify_assignable("Y_IMP_Y_VIS_TRI", (0, 2), (0, 2)) # m removed by imply
verify_assignable("Y_IMP_M_VIS_TRI", ( ), (0, 2)) # m promoted to y by imply
verify_assignable("Y_IMP_N_VIS_TRI", ( ), ( ))
# Symbols implied to m (never affects assignable values)
verify_assignable("M_IMP_Y_VIS_BOOL", (0, 2), (0, 2))
verify_assignable("M_IMP_M_VIS_BOOL", ( ), (0, 2)) # Vis. promoted
verify_assignable("M_IMP_N_VIS_BOOL", ( ), ( ))
verify_assignable("M_IMP_Y_VIS_TRI", (0, 2), (0, 1, 2))
verify_assignable("M_IMP_M_VIS_TRI", ( ), (0, 1 ))
verify_assignable("M_IMP_N_VIS_TRI", ( ), ( ))
# Symbols in y-mode choice
verify_assignable("Y_CHOICE_BOOL", (2,), (2,))
verify_assignable("Y_CHOICE_TRISTATE", (2,), (2,))
verify_assignable("Y_CHOICE_N_VIS_TRISTATE", ( ), ( ))
# Symbols in m/y-mode choice, starting out in m mode, or y mode when
# running without modules
verify_assignable("MY_CHOICE_BOOL", (2,), ( ))
verify_assignable("MY_CHOICE_TRISTATE", (2,), (0, 1))
verify_assignable("MY_CHOICE_N_VIS_TRISTATE", ( ), ( ))
c.named_choices["MY_CHOICE"].set_value(2)
# Symbols in m/y-mode choice, now in y mode
verify_assignable("MY_CHOICE_BOOL", (2,), (2,))
verify_assignable("MY_CHOICE_TRISTATE", (2,), (2,))
verify_assignable("MY_CHOICE_N_VIS_TRISTATE", ( ), ( ))
def verify_choice_assignable(choice_name, assignable_no_modules,
assignable_modules):
verify_assignable_imp(c.named_choices[choice_name],
assignable_no_modules,
assignable_modules)
# Choices with various possible modes
verify_choice_assignable("Y_CHOICE", (2, ), ( 2,))
verify_choice_assignable("MY_CHOICE", (2, ), ( 1, 2 ))
verify_choice_assignable("NMY_CHOICE", (0, 2), (0, 1, 2 ))
verify_choice_assignable("NY_CHOICE", (0, 2), (0, 2 ))
verify_choice_assignable("NM_CHOICE", ( ), (0, 1 ))
verify_choice_assignable("M_CHOICE", ( ), ( 1, ))
verify_choice_assignable("N_CHOICE", ( ), ( ))
print("Testing object relations")
c = Kconfig("Kconfiglib/tests/Krelation")
verify(c.syms["A"].nodes[0].parent is c.top_node,
"A's parent should be the top node")
verify(c.syms["B"].nodes[0].parent.item is c.named_choices["CHOICE_1"],
"B's parent should be the first choice")
verify(c.syms["C"].nodes[0].parent.item is c.syms["B"],
"C's parent should be B (due to auto menus)")
verify(c.syms["E"].nodes[0].parent.item == MENU,
"E's parent should be a menu")
verify(c.syms["E"].nodes[0].parent.parent is c.top_node,
"E's grandparent should be the top node")
verify(c.syms["G"].nodes[0].parent.item is c.named_choices["CHOICE_2"],
"G's parent should be the second choice")
verify(c.syms["G"].nodes[0].parent.parent.item == MENU,
"G's grandparent should be a menu")
print("Testing hex/int ranges")
c = Kconfig("Kconfiglib/tests/Krange", warn=False)
for sym_name in "HEX_NO_RANGE", "INT_NO_RANGE", "HEX_40", "INT_40":
sym = c.syms[sym_name]
verify(not sym.ranges,
"{} should not have ranges".format(sym_name))
for sym_name in "HEX_ALL_RANGES_DISABLED", "INT_ALL_RANGES_DISABLED", \
"HEX_RANGE_10_20_LOW_DEFAULT", \
"INT_RANGE_10_20_LOW_DEFAULT":
sym = c.syms[sym_name]
verify(sym.ranges, "{} should have ranges".format(sym_name))
# hex/int symbols without defaults should get no default value
verify_value("HEX_NO_RANGE", "")
verify_value("INT_NO_RANGE", "")
# And neither if all ranges are disabled
verify_value("HEX_ALL_RANGES_DISABLED", "")
verify_value("INT_ALL_RANGES_DISABLED", "")
# Make sure they are assignable though, and test that the form of the user
# value is reflected in the value for hex symbols
assign_and_verify("HEX_NO_RANGE", "0x123")
assign_and_verify("HEX_NO_RANGE", "123")
assign_and_verify("INT_NO_RANGE", "123")
# Defaults outside of the valid range should be clamped
verify_value("HEX_RANGE_10_20_LOW_DEFAULT", "0x10")
verify_value("HEX_RANGE_10_20_HIGH_DEFAULT", "0x20")
verify_value("INT_RANGE_10_20_LOW_DEFAULT", "10")
verify_value("INT_RANGE_10_20_HIGH_DEFAULT", "20")
# Defaults inside the valid range should be preserved. For hex symbols,
# they should additionally use the same form as in the assignment.
verify_value("HEX_RANGE_10_20_OK_DEFAULT", "0x15")
verify_value("HEX_RANGE_10_20_OK_DEFAULT_ALTERNATE", "15")
verify_value("INT_RANGE_10_20_OK_DEFAULT", "15")
# hex/int symbols with no defaults but valid ranges should default to the
# lower end of the range if it's > 0
verify_value("HEX_RANGE_10_20", "0x10")
verify_value("HEX_RANGE_0_10", "")
verify_value("INT_RANGE_10_20", "10")
verify_value("INT_RANGE_0_10", "")
verify_value("INT_RANGE_NEG_10_10", "")
# User values and dependent ranges
# Avoid warnings for assigning values outside the active range
c.disable_warnings()
def verify_range(sym_name, low, high, default):
"""
Tests that the values in the range 'low'-'high' can be assigned, and
that assigning values outside this range reverts the value back to
'default' (None if it should revert back to "").
"""
is_hex = (c.syms[sym_name].type == HEX)
for i in range(low, high + 1):
assign_and_verify_user_value(sym_name, str(i), str(i), True)
if is_hex:
# The form of the user value should be preserved for hex
# symbols
assign_and_verify_user_value(sym_name, hex(i), hex(i), True)
# Verify that assigning a user value just outside the range causes
# defaults to be used
if default is None:
default_str = ""
else:
default_str = hex(default) if is_hex else str(default)
if is_hex:
too_low_str = hex(low - 1)
too_high_str = hex(high + 1)
else:
too_low_str = str(low - 1)
too_high_str = str(high + 1)
assign_and_verify_value(sym_name, too_low_str, default_str)
assign_and_verify_value(sym_name, too_high_str, default_str)
verify_range("HEX_RANGE_10_20_LOW_DEFAULT", 0x10, 0x20, 0x10)
verify_range("HEX_RANGE_10_20_HIGH_DEFAULT", 0x10, 0x20, 0x20)
verify_range("HEX_RANGE_10_20_OK_DEFAULT", 0x10, 0x20, 0x15)
verify_range("INT_RANGE_10_20_LOW_DEFAULT", 10, 20, 10)
verify_range("INT_RANGE_10_20_HIGH_DEFAULT", 10, 20, 20)
verify_range("INT_RANGE_10_20_OK_DEFAULT", 10, 20, 15)
verify_range("HEX_RANGE_10_20", 0x10, 0x20, 0x10)
verify_range("INT_RANGE_10_20", 10, 20, 10)
verify_range("INT_RANGE_0_10", 0, 10, None)
verify_range("INT_RANGE_NEG_10_10", -10, 10, None)
# Dependent ranges
verify_value("HEX_40", "40")
verify_value("INT_40", "40")
c.syms["HEX_RANGE_10_20"].unset_value()
c.syms["INT_RANGE_10_20"].unset_value()
verify_value("HEX_RANGE_10_40_DEPENDENT", "0x10")
verify_value("INT_RANGE_10_40_DEPENDENT", "10")
c.syms["HEX_RANGE_10_20"].set_value("15")
c.syms["INT_RANGE_10_20"].set_value("15")
verify_value("HEX_RANGE_10_40_DEPENDENT", "0x15")
verify_value("INT_RANGE_10_40_DEPENDENT", "15")
c.unset_values()
verify_range("HEX_RANGE_10_40_DEPENDENT", 0x10, 0x40, 0x10)
verify_range("INT_RANGE_10_40_DEPENDENT", 10, 40, 10)
# Ranges and symbols defined in multiple locations
verify_value("INACTIVE_RANGE", "2")
verify_value("ACTIVE_RANGE", "1")
print("Testing defconfig_filename")
c = Kconfig("Kconfiglib/tests/empty")
verify(c.defconfig_filename is None,
"defconfig_filename should be None with no defconfig_list symbol")
c = Kconfig("Kconfiglib/tests/Kdefconfig_nonexistent")
verify(c.defconfig_filename is None,
"defconfig_filename should be None when none of the files in the "
"defconfig_list symbol exist")
# Referenced in Kdefconfig_existent(_but_n)
os.environ["FOO"] = "defconfig_2"
c = Kconfig("Kconfiglib/tests/Kdefconfig_existent_but_n")
verify(c.defconfig_filename is None,
"defconfig_filename should be None when the condition is n for all "
"the defaults")
c = Kconfig("Kconfiglib/tests/Kdefconfig_existent")
verify(c.defconfig_filename == "Kconfiglib/tests/defconfig_2",
"defconfig_filename should return the existing file "
"Kconfiglib/tests/defconfig_2")
# Should also look relative to $srctree if the specified defconfig is a
# relative path and can't be opened
c = Kconfig("Kconfiglib/tests/Kdefconfig_srctree")
verify(c.defconfig_filename == "Kconfiglib/tests/defconfig_2",
"defconfig_filename gave wrong file with $srctree unset")
os.environ["srctree"] = "Kconfiglib/tests"
c = Kconfig("Kdefconfig_srctree")
verify(c.defconfig_filename == "Kconfiglib/tests/sub/defconfig_in_sub",
"defconfig_filename gave wrong file with $srctree set")
os.environ.pop("srctree", None)
print("Testing mainmenu_text")
c = Kconfig("Kconfiglib/tests/empty")
verify(c.mainmenu_text == "Main menu",
"An empty Kconfig should get a default main menu prompt")
# Expanded in the mainmenu text
os.environ["FOO"] = "bar baz"
c = Kconfig("Kconfiglib/tests/Kmainmenu")
verify(c.mainmenu_text == "---bar baz---",
"Wrong mainmenu text")
print("Testing user_value")
# References undefined env. var. Disable warnings.
c = Kconfig("Kconfiglib/tests/Kmisc", warn=False)
# Avoid warnings from assigning invalid user values and assigning user
# values to symbols without prompts
c.disable_warnings()
syms = [c.syms[name] for name in
("BOOL", "TRISTATE", "STRING", "INT", "HEX")]
for sym in syms:
verify(sym.user_value is None,
"{} should not have a user value to begin with")
# Assign valid values for the types
assign_and_verify_user_value("BOOL", 0, 0, True)
assign_and_verify_user_value("BOOL", 2, 2, True)
assign_and_verify_user_value("TRISTATE", 0, 0, True)
assign_and_verify_user_value("TRISTATE", 1, 1, True)
assign_and_verify_user_value("TRISTATE", 2, 2, True)
assign_and_verify_user_value("STRING", "foo bar", "foo bar", True)
assign_and_verify_user_value("INT", "123", "123", True)
assign_and_verify_user_value("HEX", "0x123", "0x123", True)
# Assign invalid values for the types. They should retain their old user
# value.
assign_and_verify_user_value("BOOL", 1, 2, False)
assign_and_verify_user_value("BOOL", "foo", 2, False)
assign_and_verify_user_value("BOOL", "1", 2, False)
assign_and_verify_user_value("TRISTATE", "foo", 2, False)
assign_and_verify_user_value("TRISTATE", "1", 2, False)
assign_and_verify_user_value("STRING", 0, "foo bar", False)
assign_and_verify_user_value("INT", "foo", "123", False)
assign_and_verify_user_value("INT", 0, "123", False)
assign_and_verify_user_value("HEX", "foo", "0x123", False)
assign_and_verify_user_value("HEX", 0, "0x123", False)
assign_and_verify_user_value("HEX", "-0x1", "0x123", False)
for s in syms:
s.unset_value()
verify(s.user_value is None,
"{} should not have a user value after being reset".
format(s.name))
print("Testing is_menuconfig")
c = Kconfig("Kconfiglib/tests/Kmenuconfig")
for not_menuconfig in c.syms["NOT_MENUCONFIG_1"].nodes[0], \
c.syms["NOT_MENUCONFIG_2"].nodes[0], \
c.syms["MENUCONFIG_MULTI_DEF"].nodes[0], \
c.syms["COMMENT_HOOK"].nodes[0].next:
verify(not not_menuconfig.is_menuconfig,
"'{}' should have is_menuconfig False".format(not_menuconfig))
for menuconfig in c.top_node, \
c.syms["MENUCONFIG_1"].nodes[0], \
c.syms["MENUCONFIG_MULTI_DEF"].nodes[1], \
c.syms["MENU_HOOK"].nodes[0].next, \
c.syms["CHOICE_HOOK"].nodes[0].next:
verify(menuconfig.is_menuconfig,
"'{}' should have is_menuconfig True".format(menuconfig))
print("Testing 'option env' semantics")
os.environ["ENV_VAR"] = "ENV_VAR value"
# References undefined env. var., so disable warnings
c = Kconfig("Kconfiglib/tests/Kmisc", warn=False)
# Verify that 'option env' is treated like a default
verify_value("FROM_ENV", "ENV_VAR value")
verify_value("FROM_ENV_MISSING", "missing")
verify_value("FROM_ENV_WEIRD", "weird")
print("Testing defined vs undefined symbols")
for name in "A", "B", "C", "D", "BOOL", "TRISTATE", "STRING", "INT", "HEX":
verify(c.syms[name].nodes,
"{} should be defined".format(name))
for name in "NOT_DEFINED_1", "NOT_DEFINED_2", "NOT_DEFINED_3", \
"NOT_DEFINED_4":
sym = c.syms[name]
verify(not c.syms[name].nodes,
"{} should not be defined".format(name))
print("Testing Symbol.choice")
for name in "A", "B", "C", "D":
verify(c.syms[name].choice is not None,
"{} should be a choice symbol".format(name))
for name in "Q1", "Q2", "Q3", "BOOL", "TRISTATE", "STRING", "INT", "HEX", \
"FROM_ENV", "FROM_ENV_MISSING", "NOT_DEFINED_1", \
"NOT_DEFINED_2", "NOT_DEFINED_3", "NOT_DEFINED_4":
verify(c.syms[name].choice is None,
"{} should not be a choice symbol".format(name))
print("Testing is_allnoconfig_y")
verify(not c.syms["NOT_ALLNOCONFIG_Y"].is_allnoconfig_y,
"NOT_ALLNOCONFIG_Y should not be allnoconfig_y")
verify(c.syms["ALLNOCONFIG_Y"].is_allnoconfig_y,
"ALLNOCONFIG_Y should be allnoconfig_y")
print("Testing .config reading and writing")
config_test_file = "Kconfiglib/tests/config_test"
def verify_file_contents(fname, contents):
with open(fname, "r") as f:
file_contents = f.read()
verify(file_contents == contents,
"{} contains '{}'. Expected '{}'."
.format(fname, file_contents, contents))
# Writing/reading strings with characters that need to be escaped
c = Kconfig("Kconfiglib/tests/Kescape")
# Test the default value
c.write_config(config_test_file + "_from_def", header="")
verify_file_contents(config_test_file + "_from_def",
r'''CONFIG_STRING="\"\\"''' "\n")
# Write our own value
c.syms["STRING"].set_value(r'''\"a'\\''')
c.write_config(config_test_file + "_from_user", header="")
verify_file_contents(config_test_file + "_from_user",
r'''CONFIG_STRING="\\\"a'\\\\"''' "\n")
# Read back the two configs and verify the respective values
c.load_config(config_test_file + "_from_def")
verify_value("STRING", '"\\')
c.load_config(config_test_file + "_from_user")
verify_value("STRING", r'''\"a'\\''')
# Appending values from a .config
c = Kconfig("Kconfiglib/tests/Kappend")
# Values before assigning
verify_value("BOOL", "n")
verify_value("STRING", "")
# Assign BOOL
c.load_config("Kconfiglib/tests/config_set_bool", replace=False)
verify_value("BOOL", "y")
verify_value("STRING", "")
# Assign STRING
c.load_config("Kconfiglib/tests/config_set_string", replace=False)
verify_value("BOOL", "y")
verify_value("STRING", "foo bar")
# Reset BOOL
c.load_config("Kconfiglib/tests/config_set_string")
verify_value("BOOL", "n")
verify_value("STRING", "foo bar")
# Loading a completely empty .config should reset values
c.load_config("Kconfiglib/tests/empty")
verify_value("STRING", "")
# An indented assignment in a .config should be ignored
c.load_config("Kconfiglib/tests/config_indented")
verify_value("IGNOREME", "y")
# Symbol order in headers and minimal configuration files should match
# definition order, like in .config files
c = Kconfig("Kconfiglib/tests/Korder")
c.write_autoconf(config_test_file, header="")
verify_file_contents(config_test_file, """
#define CONFIG_O 0
#define CONFIG_R 1
#define CONFIG_D 2
#define CONFIG_E 3
#define CONFIG_R2 4
#define CONFIG_I 5
#define CONFIG_N 6
#define CONFIG_G 7
"""[1:])
# Differs from defaults
c.syms["O"].set_value("-1")
c.syms["R"].set_value("-1")
c.syms["E"].set_value("-1")
c.syms["R2"].set_value("-1")
c.syms["N"].set_value("-1")
c.syms["G"].set_value("-1")
c.write_min_config(config_test_file, header="")
verify_file_contents(config_test_file, """
CONFIG_O=-1
CONFIG_R=-1
CONFIG_E=-1
CONFIG_R2=-1
CONFIG_N=-1
CONFIG_G=-1
"""[1:])
print("Testing Kconfig fetching and separation")
for c in Kconfig("Kconfiglib/tests/Kmisc", warn=False), \
Kconfig("Kconfiglib/tests/Kmisc", warn=False):
for item in c.syms["BOOL"], \
c.syms["BOOL"].nodes[0], \
c.named_choices["OPTIONAL"], \
c.named_choices["OPTIONAL"].nodes[0], \
c.syms["MENU_HOOK"].nodes[0].next, \
c.syms["COMMENT_HOOK"].nodes[0].next:
verify(item.kconfig is c,
".kconfig not properly set for " + repr(item))
print("Testing imply semantics")
c = Kconfig("Kconfiglib/tests/Kimply")
verify_value("IMPLY_DIRECT_DEPS", "y")
verify_value("UNMET_DIRECT_1", "n")
verify_value("UNMET_DIRECT_2", "n")
verify_value("UNMET_DIRECT_3", "n")
verify_value("MET_DIRECT_1", "y")
verify_value("MET_DIRECT_2", "y")
verify_value("MET_DIRECT_3", "y")
verify_value("MET_DIRECT_4", "y")
verify_value("IMPLY_COND", "y")
verify_value("IMPLIED_N_COND", "n")
verify_value("IMPLIED_M_COND", "m")
verify_value("IMPLIED_Y_COND", "y")
verify_value("IMPLY_N_1", "n")
verify_value("IMPLY_N_2", "n")
verify_value("IMPLIED_FROM_N_1", "n")
verify_value("IMPLIED_FROM_N_2", "n")
verify_value("IMPLY_M", "m")
verify_value("IMPLIED_M", "m")
verify_value("IMPLIED_M_BOOL", "y")
verify_value("IMPLY_M_TO_Y", "y")
verify_value("IMPLIED_M_TO_Y", "y")
# Test user value semantics
# Verify that IMPLIED_TRISTATE is invalidated if the direct
# dependencies change
assign_and_verify("IMPLY", 2)
assign_and_verify("DIRECT_DEP", 2)
verify_value("IMPLIED_TRISTATE", 2)
assign_and_verify("DIRECT_DEP", 0)
verify_value("IMPLIED_TRISTATE", 0)
# Set back for later tests
assign_and_verify("DIRECT_DEP", 2)
# Verify that IMPLIED_TRISTATE can be set to anything when IMPLY has value
# n, and that it gets the value n by default (for non-imply-related
# reasons)
assign_and_verify("IMPLY", 0)
assign_and_verify("IMPLIED_TRISTATE", 0)
assign_and_verify("IMPLIED_TRISTATE", 1)
assign_and_verify("IMPLIED_TRISTATE", 2)
c.syms["IMPLIED_TRISTATE"].unset_value()
verify_value("IMPLIED_TRISTATE", "n")
# Same as above for m. Anything still goes, but m by default now.
assign_and_verify("IMPLY", 1)
assign_and_verify("IMPLIED_TRISTATE", 0)
assign_and_verify("IMPLIED_TRISTATE", 1)
assign_and_verify("IMPLIED_TRISTATE", 2)
c.syms["IMPLIED_TRISTATE"].unset_value()
verify_value("IMPLIED_TRISTATE", 1)
# Same as above for y. Only n and y should be accepted. m gets promoted to
# y. Default should be y.
assign_and_verify("IMPLY", 2)
assign_and_verify("IMPLIED_TRISTATE", 0)
assign_and_verify_value("IMPLIED_TRISTATE", 1, 2)
assign_and_verify("IMPLIED_TRISTATE", 2)
c.syms["IMPLIED_TRISTATE"].unset_value()
verify_value("IMPLIED_TRISTATE", 2)
# Being implied to either m or y should give a bool the value y
c.syms["IMPLY"].unset_value()
verify_value("IMPLIED_BOOL", 0)
assign_and_verify("IMPLY", 0)
verify_value("IMPLIED_BOOL", 0)
assign_and_verify("IMPLY", 1)
verify_value("IMPLIED_BOOL", 2)
assign_and_verify("IMPLY", 2)
verify_value("IMPLIED_BOOL", 2)
# A bool implied to m or y can take the values n and y
c.syms["IMPLY"].set_value(1)
assign_and_verify("IMPLIED_BOOL", 0)
assign_and_verify("IMPLIED_BOOL", 2)
c.syms["IMPLY"].set_value(2)
assign_and_verify("IMPLIED_BOOL", 0)
assign_and_verify("IMPLIED_BOOL", 2)
print("Testing choice semantics")
# Would warn for choice value symbols defined without a type, even
# though the type is automatically derived. This is probably more
# helpful than ignoring those cases, as this feature isn't used
# deliberately anywhere from what I've seen.
c = Kconfig("Kconfiglib/tests/Kchoice", warn=False)
for name in "BOOL", "BOOL_OPT", "BOOL_M", "DEFAULTS":
verify(c.named_choices[name].orig_type == BOOL,
"choice {} should have type bool".format(name))
for name in "TRISTATE", "TRISTATE_OPT", "TRISTATE_M":
verify(c.named_choices[name].orig_type == TRISTATE,
"choice {} should have type tristate".format(name))
def select_and_verify(sym):
choice = sym.nodes[0].parent.item
choice.set_value(2)
sym.set_value(2)
verify(sym.choice.selection is sym,
sym.name + " should be the selected symbol")
verify(choice.user_selection is sym,
sym.name + " should be the user selection of the choice")
verify(sym.tri_value == 2,
sym.name + " should have value y when selected")
verify(sym.user_value == 2,
sym.name + " should have user value y when selected")
for sibling in choice.syms:
if sibling is not sym:
verify(sibling.tri_value == 0,
sibling.name + " should be n when not selected")
def select_and_verify_all(choice_name):
choice = c.named_choices[choice_name]
# Select in forward order
for sym in choice.syms:
select_and_verify(sym)
# Select in reverse order
for sym in reversed(choice.syms):
select_and_verify(sym)
def verify_mode(choice_name, no_modules_mode, modules_mode):
choice = c.named_choices[choice_name]
c.modules.set_value(0)
verify(choice.tri_value == no_modules_mode,
'Wrong mode for choice {} with no modules. Expected {}, got {}.'
.format(choice.name, no_modules_mode, choice.tri_value))
c.modules.set_value(2)
verify(choice.tri_value == modules_mode,
'Wrong mode for choice {} with modules. Expected {}, got {}.'
.format(choice.name, modules_mode, choice.tri_value))
verify_mode("BOOL", 2, 2)
verify_mode("BOOL_OPT", 0, 0)
verify_mode("TRISTATE", 2, 1)
verify_mode("TRISTATE_OPT", 0, 0)
verify_mode("BOOL_M", 0, 2)
verify_mode("TRISTATE_M", 0, 1)
# Test defaults
choice = c.named_choices["DEFAULTS"]
c.syms["TRISTATE_SYM"].set_value(0)
verify(choice.selection is c.syms["OPT_4"],
"Wrong choice default with TRISTATE_SYM = n")
c.syms["TRISTATE_SYM"].set_value(2)
verify(choice.selection is c.syms["OPT_2"],
"Wrong choice default with TRISTATE_SYM = y")
c.syms["OPT_1"].set_value(2)
verify(choice.selection is c.syms["OPT_1"],
"User selection should override defaults")
verify(c.named_choices["DEFAULTS_NOT_VISIBLE"].selection
is c.syms["OPT_8"],
"Non-visible choice symbols should cause the next default to be "
"considered")
# Test y mode selection
c.modules.set_value(2)
select_and_verify_all("BOOL")
select_and_verify_all("BOOL_OPT")
select_and_verify_all("TRISTATE")
select_and_verify_all("TRISTATE_OPT")
# For BOOL_M, the mode should have been promoted
select_and_verify_all("BOOL_M")
# Test m mode selection
c.named_choices["TRISTATE"].set_value(1)
verify(c.named_choices["TRISTATE"].tri_value == 1,
"TRISTATE choice should have mode m after explicit mode assignment")
assign_and_verify_value("T_1", 0, 0)
assign_and_verify_value("T_2", 0, 0)
assign_and_verify_value("T_1", 1, 1)
assign_and_verify_value("T_2", 1, 1)
assign_and_verify_value("T_1", 2, 1)
assign_and_verify_value("T_2", 2, 1)
# Switching to y mode should cause T_2 to become selected
c.named_choices["TRISTATE"].set_value(2)
verify_value("T_1", 0)
verify_value("T_2", 2)
# Verify that choices with no explicitly specified type get the type of the
# first contained symbol with a type
verify(c.named_choices["NO_TYPE_BOOL"].orig_type == BOOL,
"Expected first choice without explicit type to have type bool")
verify(c.named_choices["NO_TYPE_TRISTATE"].orig_type == TRISTATE,
"Expected second choice without explicit type to have type "
"tristate")
# Verify that symbols without a type in the choice get the type of the
# choice
for name in "MMT_1", "MMT_2", "MMT_4", "MMT_5":
verify(c.syms[name].orig_type == BOOL,
"Expected {} to get type bool".format(name))
verify(c.syms["MMT_3"].orig_type == TRISTATE,
"Expected MMT_3 to have type tristate")
# Verify that the default selection can change depending on the
# visibility of the choice symbols
default_with_dep_choice = c.named_choices["DEFAULT_WITH_DEP"]
verify(default_with_dep_choice.selection is c.syms["B"],
"Wrong choice default with unsatisfied deps on default")
c.syms["DEP"].set_value("y")
verify(default_with_dep_choice.selection is c.syms["A"],
"Wrong choice default with satisfied deps on default")
c.syms["DEP"].set_value("n")
verify(default_with_dep_choice.selection is c.syms["B"],
"Wrong choice default with unsatisfied deps on default (round two)")
# Verify that symbols in choices that depend on the preceding symbol aren't
# considered choice symbols
weird_choice = c.named_choices["WEIRD_SYMS"]
def verify_is_normal_choice_symbol(name):
sym = c.syms[name]
verify(sym.choice is not None and
sym in weird_choice.syms and
sym.nodes[0].parent.item is weird_choice,
"{} should be a normal choice symbol".format(sym.name))
def verify_is_weird_choice_symbol(name):
sym = c.syms[name]
verify(sym.choice is None and
sym not in weird_choice.syms,
"{} should be a weird (non-)choice symbol"
.format(sym.name))
verify_is_normal_choice_symbol("WS1")
verify_is_weird_choice_symbol("WS2")
verify_is_weird_choice_symbol("WS3")
verify_is_weird_choice_symbol("WS4")
verify_is_weird_choice_symbol("WS5")
verify_is_normal_choice_symbol("WS6")
verify_is_weird_choice_symbol("WS7")
verify_is_weird_choice_symbol("WS8")
verify_is_normal_choice_symbol("WS9")
print("Testing multi.def. property copying")
c = Kconfig("Kconfiglib/tests/Kdepcopy", warn=False)
def verify_props(desc, props, prop_names):
actual = [prop[0].name for prop in props]
expected = prop_names.split()
verify(actual == expected,
"Wrong {} properties, expected '{}', got '{}'"
.format(desc, expected, actual))
verify_props("default", c.syms["MULTIDEF"].defaults,
"A B C D E F G H I J K L M N O P Q R")
verify_props("select", c.syms["MULTIDEF"].selects,
"AA BB CC DD EE FF GG HH II JJ")
verify_props("imply", c.syms["MULTIDEF"].selects,
"AA BB CC DD EE FF GG HH II JJ")
verify_props("select", c.syms["MULTIDEF_CHOICE"].selects,
"A B C")
verify_props("range", c.syms["MULTIDEF_RANGE"].ranges,
"A B C D E F")
verify_props("default", c.choices[1].defaults,
"A B C D E")
print("Testing dependency loop detection")
# These are all expected to raise dependency loop errors
for i in range(11):
filename = "Kconfiglib/tests/Kdeploop" + str(i)
try:
Kconfig(filename)
except KconfigError as e:
if "Dependency loop" not in str(e):
fail("dependency loop in {} raised wrong KconfigError"
.format(filename))
except:
fail("dependency loop in {} raised wrong exception"
.format(filename))
else:
fail("dependency loop in {} not detected".format(filename))
# Check the most complicated message completely
try:
Kconfig("Kconfiglib/tests/Kdeploop10")
except KconfigError as e:
verify_equal(str(e), """
Dependency loop
===============
A (defined at Kconfiglib/tests/Kdeploop10:1), with definition...
config A
bool
depends on B
...depends on B (defined at Kconfiglib/tests/Kdeploop10:5), with definition...
config B
bool
depends on C = 7
...depends on C (defined at Kconfiglib/tests/Kdeploop10:9), with definition...
config C
int
range D 8
...depends on D (defined at Kconfiglib/tests/Kdeploop10:13), with definition...
config D
int
default 3 if E
default 8
...depends on E (defined at Kconfiglib/tests/Kdeploop10:18), with definition...
config E
bool
(select-related dependencies: F && G)
...depends on G (defined at Kconfiglib/tests/Kdeploop10:25), with definition...
config G
bool
depends on H
...depends on the choice symbol H (defined at Kconfiglib/tests/Kdeploop10:32), with definition...
config H
bool
prompt "H" if I && <choice>
depends on I && <choice>
...depends on the choice symbol I (defined at Kconfiglib/tests/Kdeploop10:41), with definition...
config I
bool
prompt "I" if <choice>
depends on <choice>
...depends on <choice> (defined at Kconfiglib/tests/Kdeploop10:38), with definition...
choice
bool
prompt "choice" if J
...depends on J (defined at Kconfiglib/tests/Kdeploop10:46), with definition...
config J
bool
depends on A
...depends again on A (defined at Kconfiglib/tests/Kdeploop10:1)
"""[:-1])
except:
fail("Loop detection message check raised wrong exception")
else:
fail("Loop detection message check did not raise exception")
print("Testing preprocessor")
os.environ["ENV_1"] = "env_1"
os.environ["ENV_2"] = "env_2"
os.environ["ENV_3"] = "env_3"
os.environ["ENV_4"] = "env_4"
os.environ["ENV_5"] = "n"
os.environ["ENV_6"] = "Kconfiglib/tests/empty"
os.environ["ENV_7"] = "env_7"
# We verify warnings manually
c = Kconfig("Kconfiglib/tests/Kpreprocess", warn_to_stderr=False)
def verify_variable(name, unexp_value, exp_value, recursive):
var = c.variables[name]
verify(var.value == unexp_value,
"expected variable '{}' to have the unexpanded value '{}', had "
"the value '{}'".format(name, unexp_value, var.value))
verify(var.expanded_value == exp_value,
"expected variable '{}' to have the expanded value '{}', had "
"the value '{}'".format(name, exp_value, var.expanded_value))
verify(var.is_recursive == recursive,
"{} was {}, shouldn't be"
.format(name, "recursive" if var.is_recursive else "simple"))
verify_variable("simple-recursive", "foo", "foo", True)
verify_variable("simple-immediate", "bar", "bar", False)
verify_variable("simple-recursive-2", "baz", "baz", True)
verify_variable("whitespaced", "foo", "foo", True)
verify_variable("preserve-recursive", "foo bar", "foo bar", True)
verify_variable("preserve-immediate", "foo bar", "foo bar", False)
verify_variable("recursive",
"$(foo) $(bar) $($(b-char)a$(z-char)) $(indir)",
"abc def ghi jkl mno",
True)
verify_variable("immediate", "foofoo", "foofoo", False)
verify_variable("messy-fn-res",
"$($(fn-indir)-unused-arg, a b , c d )",
'surround-rev-quote " c d " " a b " surround-rev-quote ',
True)
verify_variable("special-chars-fn-res",
"$(fn,$(comma)$(dollar)$(left-paren)foo$(right-paren))",
'",$(foo)"',
True)
verify_str(c.syms["PRINT_ME"], r"""
config PRINT_ME
string
prompt "env_1" if (FOO && BAR) || !BAZ || !QAZ
default "\"foo\"" if "foo \"bar\" baz" = ""
""")
def verify_recursive(name):
try:
c.variables[name].expanded_value
except KconfigError:
pass
else:
fail("Expected '{}' expansion to flag recursive expansion, didn't"
.format(name))
verify_recursive("rec-1")
# Indirectly verifies that it's not recursive
verify_variable("safe-fn-rec-res",
"$(safe-fn-rec,safe-fn-rec-2)",
"foo",
True)
verify_recursive("unsafe-fn-rec")
verify_variable("foo-bar-baz", "$(rhs)", "value", True)
verify_variable("space-var-res", "$(foo bar)", "value", True)
verify_variable("shell-res",
"$(shell,false && echo foo bar || echo baz qaz)",
"baz qaz",
True)
verify_variable("shell-stderr-res", "", "", False)
verify_variable("location-res",
"Kconfiglib/tests/Kpreprocess:119",
"Kconfiglib/tests/Kpreprocess:119",
False)
verify_variable("warning-res", "", "", False)
verify_variable("error-n-res", "", "", False)
try:
c.variables["error-y-res"].expanded_value
except KconfigError:
pass
else:
fail("expanding error-y-res didn't raise an exception")
# Check Kconfig.env_vars
verify_equal(c.env_vars,
set(("ENV_1", "ENV_2", "ENV_3", "ENV_4", "ENV_5", "ENV_6")))
# Check that the expected warnings were generated
verify_equal(c.warnings, [
"Kconfiglib/tests/Kpreprocess:116: warning: 'echo message on stderr >&2' wrote to stderr: message on stderr",
"Kconfiglib/tests/Kpreprocess:124: warning: a warning"
])
print("Testing KCONFIG_STRICT")
os.environ["KCONFIG_STRICT"] = "y"
c = Kconfig("Kconfiglib/tests/Kstrict", warn_to_stderr=False)
verify_equal("\n".join(c.warnings), """
warning: the int symbol INT (defined at Kconfiglib/tests/Kstrict:8) has a non-int range [UNDEF_2 (undefined), 8 (undefined)]
warning: undefined symbol UNDEF_1:
- Referenced at Kconfiglib/tests/Kstrict:4:
config BOOL
bool
prompt "foo" if DEF || !UNDEF_1
default UNDEF_2
- Referenced at Kconfiglib/tests/Kstrict:19:
menu "menu"
depends on UNDEF_1
visible if UNDEF_3
warning: undefined symbol UNDEF_2:
- Referenced at Kconfiglib/tests/Kstrict:4:
config BOOL
bool
prompt "foo" if DEF || !UNDEF_1
default UNDEF_2
- Referenced at Kconfiglib/tests/Kstrict:8:
config INT
int
range UNDEF_2 8
range 5 15
default 10
warning: undefined symbol UNDEF_3:
- Referenced at Kconfiglib/tests/Kstrict:19:
menu "menu"
depends on UNDEF_1
visible if UNDEF_3
"""[1:])
os.environ.pop("KCONFIG_STRICT")
print("\nAll selftests passed\n" if all_passed else
"\nSome selftests failed\n")
def run_compatibility_tests():
"""
Runs tests on configurations from the kernel. Tests compability with the
C implementation by comparing outputs.
"""
# Referenced inside the kernel Kconfig files.
#
# The str() makes the type of the value 'str' on both Python 2 and Python 3,
# which is nice for some later dictionary key sanity checks.
os.environ["KERNELVERSION"] = str(
subprocess.check_output("make kernelversion", shell=True)
.decode("utf-8").rstrip()
)
os.environ["CC_VERSION_TEXT"] = str(
subprocess.check_output("gcc --version | head -n1", shell=True)
.decode("utf-8").rstrip()
)
os.environ["srctree"] = "."
os.environ["CC"] = "gcc"
if not os.path.exists("scripts/kconfig/conf"):
print("\nscripts/kconfig/conf does not exist -- running "
"'make allnoconfig' to build it...")
shell("make allnoconfig")
print("Running compatibility tests...\n")
test_fns = (test_defconfig,
# Fails for a few defconfigs due to a bug in the C tools. Will
# be enabled once patches get in.
#test_min_config,
test_alldefconfig,
test_allnoconfig,
test_allnoconfig_walk,
test_allmodconfig,
test_allyesconfig,
test_sanity)
for test_fn in test_fns:
# The test description is taken from the docstring of the corresponding
# function
print(textwrap.dedent(test_fn.__doc__))
for arch, srcarch in all_arch_srcarch():
# Referenced inside the Kconfig files
os.environ["ARCH"] = arch
os.environ["SRCARCH"] = srcarch
rm_configs()
test_fn(arch, srcarch)
if all_passed:
print("All selftests and compatibility tests passed")
else:
sys.exit("Some tests failed")
def all_arch_srcarch():
for srcarch in os.listdir("arch"):
# arc and h8300 are currently broken with the C tools on linux-next as
# well. Perhaps they require cross-compilers to be installed.
#
# User-mode Linux has an unorthodox Kconfig setup that would require a
# different testing setup. Skip it too.
if srcarch in ("arc", "h8300", "um"):
continue
if os.path.exists(os.path.join("arch", srcarch, "Kconfig")):
yield (srcarch, srcarch)
# Some arches define additional ARCH settings with ARCH != SRCARCH
# (search for "Additional ARCH settings for" in the top-level Makefile)
yield ("i386", "x86")
yield ("x86_64", "x86")
yield ("sparc32", "sparc")
yield ("sparc64", "sparc")
yield ("sh64", "sh")
def test_allnoconfig(arch, srcarch):
"""
Verify that allnoconfig.py generates the same .config as
'make allnoconfig', for each architecture. Runs the script via
'make scriptconfig'.
"""
shell("make scriptconfig SCRIPT=Kconfiglib/allnoconfig.py "
"PYTHONCMD='{}'".format(sys.executable))
shell("mv .config ._config")
shell("scripts/kconfig/conf --allnoconfig Kconfig")
compare_configs(arch)
def test_allnoconfig_walk(arch, srcarch):
"""
Verify that examples/allnoconfig_walk.py generates the same .config as
'make allnoconfig', for each architecture. Runs the script via
'make scriptconfig'.
"""
shell("make scriptconfig SCRIPT=Kconfiglib/examples/allnoconfig_walk.py "
"PYTHONCMD='{}'".format(sys.executable))
shell("mv .config ._config")
shell("scripts/kconfig/conf --allnoconfig Kconfig")
compare_configs(arch)
def test_allmodconfig(arch, srcarch):
"""
Verify that allmodconfig.py generates the same .config as
'make allmodconfig', for each architecture. Runs the script via
'make scriptconfig'.
"""
shell("make scriptconfig SCRIPT=Kconfiglib/allmodconfig.py "
"PYTHONCMD='{}'".format(sys.executable))
shell("mv .config ._config")
shell("scripts/kconfig/conf --allmodconfig Kconfig")
compare_configs(arch)
def test_allyesconfig(arch, srcarch):
"""
Verify that allyesconfig.py generates the same .config as
'make allyesconfig', for each architecture. Runs the script via
'make scriptconfig'.
"""
shell("make scriptconfig SCRIPT=Kconfiglib/allyesconfig.py "
"PYTHONCMD='{}'".format(sys.executable))
shell("mv .config ._config")
shell("scripts/kconfig/conf --allyesconfig Kconfig")
compare_configs(arch)
def test_sanity(arch, srcarch):
"""
Do sanity checks on each configuration and call all public methods on all
symbols, choices, and menu nodes for all architectures to make sure we
never crash or hang.
"""
print("For {}...".format(arch))
kconf = Kconfig()
for sym in kconf.defined_syms:
verify(sym._visited == 2,
"{} has broken dependency loop detection (_visited = {})"
.format(sym.name, sym._visited))
kconf.modules
kconf.defconfig_list
kconf.defconfig_filename
kconf.enable_redun_warnings()
kconf.disable_redun_warnings()
kconf.enable_undef_warnings()
kconf.disable_undef_warnings()
kconf.enable_warnings()
kconf.disable_warnings()
kconf.enable_stderr_warnings()
kconf.disable_stderr_warnings()
kconf.mainmenu_text
kconf.unset_values()
kconf.write_autoconf("/dev/null")
# No tempfile.TemporaryDirectory in Python 2
tmpdir = tempfile.mkdtemp()
kconf.sync_deps(os.path.join(tmpdir, "deps")) # Create
kconf.sync_deps(os.path.join(tmpdir, "deps")) # Update
shutil.rmtree(tmpdir)
# Python 2/3 compatible
for key, sym in kconf.syms.items():
verify(isinstance(key, str), "weird key '{}' in syms dict".format(key))
verify(not sym.is_constant, sym.name + " in 'syms' and constant")
verify(sym not in kconf.const_syms,
sym.name + " in both 'syms' and 'const_syms'")
for dep in sym._dependents:
verify(not dep.is_constant,
"the constant symbol {} depends on {}"
.format(dep.name, sym.name))
sym.__repr__()
sym.__str__()
sym.assignable
kconf.disable_warnings()
sym.set_value(2)
sym.set_value("foo")
sym.unset_value()
kconf.enable_warnings()
sym.str_value
sym.tri_value
sym.type
sym.user_value
sym.visibility
for sym in kconf.defined_syms:
verify(sym.nodes, sym.name + " is defined but lacks menu nodes")
verify(not (sym.orig_type not in (BOOL, TRISTATE) and sym.choice),
sym.name + " is a choice symbol but not bool/tristate")
for key, sym in kconf.const_syms.items():
verify(isinstance(key, str),
"weird key '{}' in const_syms dict".format(key))
verify(sym.is_constant,
'"{}" is in const_syms but not marked constant'
.format(sym.name))
verify(not sym.nodes,
'"{}" is constant but has menu nodes'.format(sym.name))
verify(not sym._dependents,
'"{}" is constant but is a dependency of some symbol'
.format(sym.name))
verify(not sym.choice,
'"{}" is constant and a choice symbol'.format(sym.name))
sym.__repr__()
sym.__str__()
sym.assignable
kconf.disable_warnings()
sym.set_value(2)
sym.set_value("foo")
sym.unset_value()
kconf.enable_warnings()
sym.str_value
sym.tri_value
sym.type
sym.visibility
for choice in kconf.choices:
for sym in choice.syms:
verify(sym.choice is choice,
"{0} is in choice.syms but 'sym.choice' is not the choice"
.format(sym.name))
verify(sym.type in (BOOL, TRISTATE),
"{} is a choice symbol but is not a bool/tristate"
.format(sym.name))
choice.__str__()
choice.__repr__()
choice.str_value
choice.tri_value
choice.user_value
choice.assignable
choice.selection
choice.type
choice.visibility
# Menu nodes
node = kconf.top_node
while 1:
# Everything else should be well exercised elsewhere
node.__repr__()
node.__str__()
verify(isinstance(node.item, (Symbol, Choice)) or \
node.item in (MENU, COMMENT),
"'{}' appeared as a menu item".format(node.item))
if node.list is not None:
node = node.list
elif node.next is not None:
node = node.next
else:
while node.parent is not None:
node = node.parent
if node.next is not None:
node = node.next
break
else:
break
def test_alldefconfig(arch, srcarch):
"""
Verify that alldefconfig.py generates the same .config as
'make alldefconfig', for each architecture. Runs the script via
'make scriptconfig'.
"""
shell("make scriptconfig SCRIPT=Kconfiglib/alldefconfig.py "
"PYTHONCMD='{}'".format(sys.executable))
shell("mv .config ._config")
shell("scripts/kconfig/conf --alldefconfig Kconfig")
compare_configs(arch)
def test_defconfig(arch, srcarch):
"""
Verify that Kconfiglib generates the same .config as scripts/kconfig/conf,
for each architecture/defconfig pair. In obsessive mode, this test includes
nonsensical groupings of arches with defconfigs from other arches (every
arch/defconfig combination) and takes an order of magnitude longer time to
run.
With logging enabled, this test appends any failures to a file
test_defconfig_fails in the root.
"""
kconf = Kconfig()
if obsessive:
defconfigs = []
# Collect all defconfigs. This could be done once instead, but it's
# a speedy operation comparatively.
for srcarch_ in os.listdir("arch"):
defconfigs.extend(defconfig_files(srcarch_))
else:
defconfigs = defconfig_files(srcarch)
# Test architecture for each defconfig
for defconfig in defconfigs:
rm_configs()
kconf.load_config(defconfig)
kconf.write_config("._config")
shell("scripts/kconfig/conf --defconfig='{}' Kconfig".
format(defconfig))
arch_defconfig_str = " {:14}with {:60} ".format(arch, defconfig)
if equal_configs():
print(arch_defconfig_str + "OK")
else:
print(arch_defconfig_str + "FAIL")
fail()
if log:
with open("test_defconfig_fails", "a") as fail_log:
fail_log.write("{} with {} did not match\n"
.format(arch, defconfig))
def test_min_config(arch, srcarch):
"""
Verify that Kconfiglib generates the same .config as 'make savedefconfig'
for each architecture/defconfig pair.
"""
kconf = Kconfig()
if obsessive_min_config:
defconfigs = []
for srcarch_ in os.listdir("arch"):
defconfigs.extend(defconfig_files(srcarch_))
else:
defconfigs = defconfig_files(srcarch)
for defconfig in defconfigs:
rm_configs()
kconf.load_config(defconfig)
kconf.write_min_config("._config")
shell("cp {} .config".format(defconfig))
shell("scripts/kconfig/conf --savedefconfig=.config Kconfig")
arch_defconfig_str = " {:14}with {:60} ".format(arch, defconfig)
if equal_configs():
print(arch_defconfig_str + "OK")
else:
print(arch_defconfig_str + "FAIL")
#
# Helper functions
#
def defconfig_files(srcarch):
# Yields a list of defconfig file filenames for a particular srcarch
# subdirectory (arch/<srcarch>/)
srcarch_dir = os.path.join("arch", srcarch)
# Some arches have a defconfig in the root of their arch/<arch>/ directory
root_defconfig = os.path.join(srcarch_dir, "defconfig")
if os.path.exists(root_defconfig):
yield root_defconfig
# Assume all files in the arch/<arch>/configs/ directory (if it exists) are
# configurations
defconfigs_dir = os.path.join(srcarch_dir, "configs")
if not os.path.isdir(defconfigs_dir):
return
for dirpath, _, filenames in os.walk(defconfigs_dir):
for filename in filenames:
yield os.path.join(dirpath, filename)
def rm_configs():
"""
Delete any old ".config" (generated by the C implementation) and
"._config" (generated by us), if present.
"""
def rm_if_exists(f):
if os.path.exists(f):
os.remove(f)
rm_if_exists(".config")
rm_if_exists("._config")
def compare_configs(arch):
if equal_configs():
print("{:14}OK".format(arch))
else:
print("{:14}FAIL".format(arch))
fail()
def equal_configs():
with open(".config") as f:
their = f.readlines()
# Strip the header generated by 'conf'
i = 0
for line in their:
if not line.startswith("#") or \
re.match(r"# CONFIG_(\w+) is not set", line):
break
i += 1
their = their[i:]
try:
f = open("._config")
except IOError as e:
if e.errno != errno.ENOENT:
raise
print("._config not found. Did you forget to apply the Makefile patch?")
return False
else:
with f:
# [1:] strips the default header
our = f.readlines()[1:]
if their == our:
return True
# Print a unified diff to help debugging
print("Mismatched .config's! Unified diff:")
sys.stdout.writelines(difflib.unified_diff(their, our, fromfile="their",
tofile="our"))
return False
if __name__ == "__main__":
run_tests()
|
[] |
[] |
[
"ENV_1",
"ENV_6",
"ENV_7",
"FOO",
"ENV_2",
"_GSOURCED",
"_GRSOURCED",
"KERNELVERSION",
"_RSOURCED",
"ENV_3",
"ENV_VAR",
"srctree",
"ARCH",
"SRCARCH",
"ENV_5",
"CC",
"CONFIG_",
"ENV_4",
"SUB_DIR_FROM_ENV",
"_SOURCED",
"KCONFIG_STRICT",
"CC_VERSION_TEXT",
"TESTS_DIR_FROM_ENV"
] |
[]
|
["ENV_1", "ENV_6", "ENV_7", "FOO", "ENV_2", "_GSOURCED", "_GRSOURCED", "KERNELVERSION", "_RSOURCED", "ENV_3", "ENV_VAR", "srctree", "ARCH", "SRCARCH", "ENV_5", "CC", "CONFIG_", "ENV_4", "SUB_DIR_FROM_ENV", "_SOURCED", "KCONFIG_STRICT", "CC_VERSION_TEXT", "TESTS_DIR_FROM_ENV"]
|
python
| 23 | 0 | |
examples/time/now/go/returnsCurrentTimeOptionallyWithLocation.go
|
package example
import (
"fmt"
"os"
"github.com/micro/services/clients/go/time"
)
// Get the current time
func ReturnsCurrentTimeOptionallyWithLocation() {
timeService := time.NewTimeService(os.Getenv("MICRO_API_TOKEN"))
rsp, err := timeService.Now(&time.NowRequest{})
fmt.Println(rsp, err)
}
|
[
"\"MICRO_API_TOKEN\""
] |
[] |
[
"MICRO_API_TOKEN"
] |
[]
|
["MICRO_API_TOKEN"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.