filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tests/example/app/nested/config.py
|
import os
from pyspark_tooling.logger import log
def get_parallellism():
"""The EMR cluster should provide information
about how many partitions are available"""
parallellism = int(os.environ["PYSPARK_DEFAULT_PARALLELLISM"])
log.info("check pyspark parallellism", parallellism=parallellism)
assert isinstance(parallellism, int)
assert parallellism > 0
return parallellism
def get_target(step="main"):
s3_path = os.environ["S3_PATH"]
log.info("check s3 path", path=s3_path)
assert isinstance(s3_path, str)
assert len(s3_path) > 0
return f"{s3_path}/target/step={step}"
|
[] |
[] |
[
"S3_PATH",
"PYSPARK_DEFAULT_PARALLELLISM"
] |
[]
|
["S3_PATH", "PYSPARK_DEFAULT_PARALLELLISM"]
|
python
| 2 | 0 | |
src/spacebears/vendor/github.com/kelseyhightower/envconfig/envconfig.go
|
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"syscall"
"time"
)
// ErrInvalidSpecification indicates that a specification is of the wrong type.
var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
// A ParseError occurs when an environment variable cannot be converted to
// the type required by a struct field during assignment.
type ParseError struct {
KeyName string
FieldName string
TypeName string
Value string
}
// A Decoder is a type that knows how to de-serialize environment variables
// into itself.
type Decoder interface {
Decode(value string) error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s", e.KeyName, e.FieldName, e.Value, e.TypeName)
}
// Process populates the specified struct based on environment variables
func Process(prefix string, spec interface{}) error {
s := reflect.ValueOf(spec)
if s.Kind() != reflect.Ptr {
return ErrInvalidSpecification
}
s = s.Elem()
if s.Kind() != reflect.Struct {
return ErrInvalidSpecification
}
typeOfSpec := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
if !f.CanSet() || typeOfSpec.Field(i).Tag.Get("ignored") == "true" {
continue
}
if typeOfSpec.Field(i).Anonymous && f.Kind() == reflect.Struct {
embeddedPtr := f.Addr().Interface()
if err := Process(prefix, embeddedPtr); err != nil {
return err
}
f.Set(reflect.ValueOf(embeddedPtr).Elem())
}
alt := typeOfSpec.Field(i).Tag.Get("envconfig")
fieldName := typeOfSpec.Field(i).Name
if alt != "" {
fieldName = alt
}
key := strings.ToUpper(fmt.Sprintf("%s_%s", prefix, fieldName))
// `os.Getenv` cannot differentiate between an explicitly set empty value
// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
// but it is only available in go1.5 or newer.
value, ok := syscall.Getenv(key)
if !ok && alt != "" {
key := strings.ToUpper(fieldName)
value, ok = syscall.Getenv(key)
}
def := typeOfSpec.Field(i).Tag.Get("default")
if def != "" && !ok {
value = def
}
req := typeOfSpec.Field(i).Tag.Get("required")
if !ok && def == "" {
if req == "true" {
return fmt.Errorf("required key %s missing value", key)
}
continue
}
err := processField(value, f)
if err != nil {
return &ParseError{
KeyName: key,
FieldName: fieldName,
TypeName: f.Type().String(),
Value: value,
}
}
}
return nil
}
// MustProcess is the same as Process but panics if an error occurs
func MustProcess(prefix string, spec interface{}) {
if err := Process(prefix, spec); err != nil {
panic(err)
}
}
func processField(value string, field reflect.Value) error {
typ := field.Type()
decoder := decoderFrom(field)
if decoder != nil {
return decoder.Decode(value)
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
if field.IsNil() {
field.Set(reflect.New(typ))
}
field = field.Elem()
}
switch typ.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var (
val int64
err error
)
if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
var d time.Duration
d, err = time.ParseDuration(value)
val = int64(d)
} else {
val, err = strconv.ParseInt(value, 0, typ.Bits())
}
if err != nil {
return err
}
field.SetInt(val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val, err := strconv.ParseUint(value, 0, typ.Bits())
if err != nil {
return err
}
field.SetUint(val)
case reflect.Bool:
val, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(val)
case reflect.Float32, reflect.Float64:
val, err := strconv.ParseFloat(value, typ.Bits())
if err != nil {
return err
}
field.SetFloat(val)
case reflect.Slice:
vals := strings.Split(value, ",")
sl := reflect.MakeSlice(typ, len(vals), len(vals))
for i, val := range vals {
err := processField(val, sl.Index(i))
if err != nil {
return err
}
}
field.Set(sl)
}
return nil
}
func decoderFrom(field reflect.Value) Decoder {
if field.CanInterface() {
dec, ok := field.Interface().(Decoder)
if ok {
return dec
}
}
// also check if pointer-to-type implements Decoder,
// and we can get a pointer to our field
if field.CanAddr() {
field = field.Addr()
dec, ok := field.Interface().(Decoder)
if ok {
return dec
}
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
edb/tools/test/runner.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import asyncio
import binascii
import collections
import collections.abc
import csv
import dataclasses
import enum
import io
import itertools
import json
import multiprocessing
import multiprocessing.reduction
import multiprocessing.util
import os
import random
import re
import sys
import threading
import time
import types
import unittest.case
import unittest.result
import unittest.runner
import unittest.signals
import warnings
import click
import edgedb
from edb.common import devmode
from edb.testbase import lang as tb_lang
from edb.testbase import server as tb
from . import cpython_state
from . import mproc_fixes
from . import styles
result: Optional[unittest.result.TestResult] = None
coverage_run: Optional[Any] = None
py_hash_secret: bytes = cpython_state.get_py_hash_secret()
py_random_seed: bytes = random.SystemRandom().randbytes(8)
def teardown_suite() -> None:
# The TestSuite methods are mutating the *result* object,
# and the suite itself does not hold any state whatsoever,
# and, in our case specifically, it doesn't even hold
# references to tests being run, so we can think of
# its methods as static.
suite = StreamingTestSuite()
suite._tearDownPreviousClass(None, result) # type: ignore[attr-defined]
suite._handleModuleTearDown(result) # type: ignore[attr-defined]
def init_worker(status_queue: multiprocessing.SimpleQueue,
param_queue: multiprocessing.SimpleQueue,
result_queue: multiprocessing.SimpleQueue) -> None:
global result
global coverage_run
global py_hash_secret
global py_random_seed
# Make sure the generator is re-seeded, as we have inherited
# the seed from the parent process.
py_random_seed = random.SystemRandom().randbytes(8)
random.seed(py_random_seed)
result = ChannelingTestResult(result_queue)
if not param_queue.empty():
server_addr, backend_dsn = param_queue.get()
if server_addr is not None:
os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = json.dumps(server_addr)
if backend_dsn:
os.environ['EDGEDB_TEST_BACKEND_DSN'] = backend_dsn
os.environ['EDGEDB_TEST_PARALLEL'] = '1'
coverage_run = devmode.CoverageConfig.start_coverage_if_requested()
py_hash_secret = cpython_state.get_py_hash_secret()
status_queue.put(True)
def shutdown_worker() -> None:
global coverage_run
teardown_suite()
if coverage_run is not None:
coverage_run.stop()
coverage_run.save()
class StreamingTestSuite(unittest.TestSuite):
_cleanup = False
def run(self, test, result):
with warnings.catch_warnings(record=True) as ww:
warnings.resetwarnings()
warnings.simplefilter('default')
# This is temporary, until we implement `subtransaction`
# functionality of RFC1004
warnings.filterwarnings(
'ignore',
message=r'The "transaction\(\)" method is deprecated'
r' and is scheduled to be removed',
category=DeprecationWarning)
self._run(test, result)
if ww:
for wmsg in ww:
if wmsg.source is not None:
wmsg.source = str(wmsg.source)
result.addWarning(test, wmsg)
def _run(self, test, result):
result._testRunEntered = True
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
return
start = time.monotonic()
test.run(result)
elapsed = time.monotonic() - start
result.record_test_stats(test, {'running-time': elapsed})
result.annotate_test(test, {
'py-hash-secret': py_hash_secret,
'py-random-seed': py_random_seed,
})
result._testRunEntered = False
return result
def _run_test(workload):
suite = StreamingTestSuite()
if isinstance(workload, collections.abc.Iterable):
# Got a test suite
for test in workload:
suite.run(test, result)
else:
suite.run(workload, result)
def _is_exc_info(args):
return (
isinstance(args, tuple) and
len(args) == 3 and
issubclass(args[0], BaseException)
)
@dataclasses.dataclass
class SerializedServerError:
test_error: str
server_error: str
class ChannelingTestResultMeta(type):
@staticmethod
def get_wrapper(meth):
def _wrapper(self, *args, **kwargs):
args = list(args)
if args and _is_exc_info(args[-1]):
exc_info = args[-1]
err = self._exc_info_to_string(exc_info, args[0])
if isinstance(exc_info[1], edgedb.EdgeDBError):
srv_tb = exc_info[1].get_server_context()
if srv_tb:
err = SerializedServerError(err, srv_tb)
args[-1] = err
try:
self._queue.put((meth, args, kwargs))
except Exception:
print(
f'!!! Test worker child process: '
f'failed to serialize arguments for {meth}: '
f'*args={args} **kwargs={kwargs} !!!')
raise
return _wrapper
def __new__(mcls, name, bases, dct):
for meth in {'startTest', 'addSuccess', 'addError', 'addFailure',
'addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'addSubTest', 'addWarning', 'record_test_stats',
'annotate_test'}:
dct[meth] = mcls.get_wrapper(meth)
return super().__new__(mcls, name, bases, dct)
class ChannelingTestResult(unittest.result.TestResult,
metaclass=ChannelingTestResultMeta):
def __init__(self, queue):
super().__init__(io.StringIO(), False, 1)
self._queue = queue
def _setupStdout(self):
pass
def _restoreStdout(self):
pass
def printErrors(self):
pass
def printErrorList(self, flavour, errors):
pass
def __getstate__(self):
state = self.__dict__.copy()
state.pop('_queue')
state.pop('_original_stdout')
state.pop('_original_stderr')
return state
def monitor_thread(queue, result):
while True:
methname, args, kwargs = queue.get()
if methname is None and args is None and kwargs is None:
# This must be the last message in the queue, injected
# when all tests are completed and the pool is about
# to be closed.
break
method = result
for part in methname.split('.'):
method = getattr(method, part)
method(*args, **kwargs)
class ParallelTestSuite(unittest.TestSuite):
def __init__(self, tests, server_conn, num_workers, backend_dsn):
self.tests = tests
self.server_conn = server_conn
self.num_workers = num_workers
self.stop_requested = False
self.backend_dsn = backend_dsn
def run(self, result):
# We use SimpleQueues because they are more predictable.
# They do the necessary IO directly, without using a
# helper thread.
result_queue = multiprocessing.SimpleQueue()
status_queue = multiprocessing.SimpleQueue()
worker_param_queue = multiprocessing.SimpleQueue()
# Prepopulate the worker param queue with server connection
# information.
for _ in range(self.num_workers):
worker_param_queue.put((self.server_conn, self.backend_dsn))
result_thread = threading.Thread(
name='test-monitor', target=monitor_thread,
args=(result_queue, result), daemon=True)
result_thread.start()
initargs = (status_queue, worker_param_queue, result_queue)
pool = multiprocessing.Pool(
self.num_workers,
initializer=mproc_fixes.WorkerScope(init_worker, shutdown_worker),
initargs=initargs)
# Wait for all workers to initialize.
for _ in range(self.num_workers):
status_queue.get()
with pool:
ar = pool.map_async(_run_test, iter(self.tests), chunksize=1)
while True:
try:
ar.get(timeout=0.1)
except multiprocessing.TimeoutError:
if self.stop_requested:
break
else:
continue
else:
break
# Post the terminal message to the queue so that
# test-monitor can stop.
result_queue.put((None, None, None))
# Give the test-monitor thread some time to
# process the queue messages. If something
# goes wrong, the thread will be forcibly
# joined by a timeout.
result_thread.join(timeout=3)
# Wait for pool to shutdown, this includes test teardowns.
pool.join()
return result
class SequentialTestSuite(unittest.TestSuite):
def __init__(self, tests, server_conn, backend_dsn):
self.tests = tests
self.server_conn = server_conn
self.stop_requested = False
self.backend_dsn = backend_dsn
def run(self, result_):
global result
result = result_
if self.server_conn:
os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = \
json.dumps(self.server_conn)
if self.backend_dsn:
os.environ['EDGEDB_TEST_BACKEND_DSN'] = self.backend_dsn
random.seed(py_random_seed)
for test in self.tests:
_run_test(test)
if self.stop_requested:
break
# Make sure the class and the module teardown methods are
# executed for the trailing test, _run_test() does not do
# this for us.
teardown_suite()
return result
class Markers(enum.Enum):
passed = '.'
errored = 'E'
skipped = 's'
failed = 'F'
xfailed = 'x' # expected fail
not_implemented = '-'
upassed = 'U' # unexpected success
class OutputFormat(enum.Enum):
auto = 'auto'
simple = 'simple'
stacked = 'stacked'
verbose = 'verbose'
class BaseRenderer:
def __init__(self, *, tests, stream):
self.stream = stream
self.styles_map = {
marker.value: getattr(styles, f'marker_{marker.name}')
for marker in Markers}
def format_test(self, test):
if isinstance(test, unittest.case._SubTest):
if test.params:
params = ', '.join(
f'{k}={v!r}' for k, v in test.params.items())
else:
params = '<subtest>'
return f'{test.test_case} {{{params}}}'
else:
if hasattr(test, 'fail_notes') and test.fail_notes:
fail_notes = ', '.join(
f'{k}={v!r}' for k, v in test.fail_notes.items())
return f'{test} {{{fail_notes}}}'
else:
return str(test)
def report(self, test, marker, description=None, *, currently_running):
raise NotImplementedError
class SimpleRenderer(BaseRenderer):
def report(self, test, marker, description=None, *, currently_running):
click.echo(self.styles_map[marker.value](marker.value),
nl=False, file=self.stream)
class VerboseRenderer(BaseRenderer):
fullnames = {
Markers.passed: 'OK',
Markers.errored: 'ERROR',
Markers.skipped: 'SKIPPED',
Markers.failed: 'FAILED',
Markers.xfailed: 'expected failure',
Markers.not_implemented: 'not implemented',
Markers.upassed: 'unexpected success',
}
def _render_test(self, test, marker, description):
test_title = self.format_test(test)
if description:
return f'{test_title}: {self.fullnames[marker]}: {description}'
else:
return f'{test_title}: {self.fullnames[marker]}'
def report(self, test, marker, description=None, *, currently_running):
style = self.styles_map[marker.value]
click.echo(style(self._render_test(test, marker, description)),
file=self.stream)
class MultiLineRenderer(BaseRenderer):
FT_LABEL = 'First few failed: '
FT_MAX_LINES = 3
R_LABEL = 'Running: '
R_MAX_LINES = 3
def __init__(self, *, tests, stream):
super().__init__(tests=tests, stream=stream)
self.total_tests = len(tests)
self.completed_tests = 0
test_modules = {test.__class__.__module__ for test in tests}
max_test_module_len = max((len(self._render_modname(name))
for name in test_modules), default=0)
self.first_col_width = max_test_module_len + 1 # 1 == len(' ')
self.failed_tests = set()
self.buffer = collections.defaultdict(str)
self.last_lines = -1
self.max_lines = 0
self.max_label_lines_rendered = collections.defaultdict(int)
def report(self, test, marker, description=None, *, currently_running):
if marker in {Markers.failed, Markers.errored}:
test_name = test.id().rpartition('.')[2]
if ' ' in test_name:
test_name = test_name.split(' ')[0]
self.failed_tests.add(test_name)
self.buffer[test.__class__.__module__] += marker.value
self.completed_tests += 1
self._render(currently_running)
def _render_modname(self, name):
return name.replace('.', '/') + '.py'
def _color_second_column(self, line, style):
return line[:self.first_col_width] + style(line[self.first_col_width:])
def _render(self, currently_running):
def print_line(line):
if len(line) < cols:
line += ' ' * (cols - len(line))
lines.append(line)
def print_empty_line():
print_line(' ')
last_render = self.completed_tests == self.total_tests
cols, rows = click.get_terminal_size()
second_col_width = cols - self.first_col_width
def _render_test_list(label, max_lines, tests, style):
if (
len(label) > self.first_col_width
or cols - self.first_col_width <= 40
):
return
print_empty_line()
line = f'{label}{" " * (self.first_col_width - len(label))}'
tests_lines = 1
for testi, test in enumerate(tests, 1):
last = testi == len(tests)
if not last:
test += ', '
test_name_len = len(test)
if len(line) + test_name_len < cols:
line += test
else:
if tests_lines == max_lines:
if len(line) + 3 < cols:
line += '...'
break
else:
line += (cols - len(line)) * ' '
line = self._color_second_column(line, style)
lines.append(line)
tests_lines += 1
line = self.first_col_width * ' '
if len(line) + test_name_len > cols:
continue
line += test
line += (cols - len(line)) * ' '
line = self._color_second_column(line, style)
lines.append(line)
# Prevent the rendered output from "jumping" up/down when we
# render 2 lines worth of running tests just after we rendered
# 3 lines.
for _ in range(self.max_label_lines_rendered[label] - tests_lines):
lines.append(' ' * cols)
self.max_label_lines_rendered[label] = max(
self.max_label_lines_rendered[label],
tests_lines
)
clear_cmd = ''
if self.last_lines > 0:
# Move cursor up `last_lines` times.
clear_cmd = f'\r\033[{self.last_lines}A'
lines = []
for mod, progress in self.buffer.items():
line = self._render_modname(mod).ljust(self.first_col_width, ' ')
while progress:
second_col = progress[:second_col_width]
second_col = second_col.ljust(second_col_width, ' ')
progress = progress[second_col_width:]
# Apply styles *after* slicing and padding the string
# (otherwise ANSI codes could be sliced in half).
second_col = re.sub(
r'\S',
lambda x: self.styles_map[x[0]](x[0]),
second_col)
lines.append(f'{line}{second_col}')
if line[0] != ' ':
line = ' ' * self.first_col_width
if not last_render:
if self.failed_tests:
_render_test_list(
self.FT_LABEL,
self.FT_MAX_LINES,
self.failed_tests,
styles.marker_errored,
)
running_tests = []
for test in currently_running:
test_name = test.id().rpartition('.')[2]
if ' ' in test_name:
test_name = test_name.split(' ')[0]
running_tests.append(test_name)
if not running_tests:
running_tests.append('...')
_render_test_list(
self.R_LABEL,
self.R_MAX_LINES,
running_tests,
styles.marker_passed
)
print_empty_line()
print_line(
f'Progress: {self.completed_tests}/{self.total_tests} tests.')
if last_render:
if self.max_lines > len(lines):
for _ in range(self.max_lines - len(lines)):
lines.insert(0, ' ' * cols)
else:
# If it's not the last test, check if our render buffer
# requires more rows than currently visible.
if len(lines) + 1 > rows:
# Scroll the render buffer to the bottom and
# cut the lines from the beginning, so that it
# will fit the screen.
#
# We need to do this because we can't move the
# cursor past the visible screen area, so if we
# render more data than the screen can fit, we
# will have lot's of garbage output.
lines = lines[len(lines) + 1 - rows:]
lines[0] = '^' * cols
# Hide cursor.
print('\033[?25l', end='', flush=True, file=self.stream)
try:
# Use `print` (not `click.echo`) because we want to
# precisely control when the output is flushed.
print(clear_cmd + '\n'.join(lines), flush=False, file=self.stream)
finally:
# Show cursor.
print('\033[?25h', end='', flush=True, file=self.stream)
self.last_lines = len(lines)
self.max_lines = max(self.last_lines, self.max_lines)
class ParallelTextTestResult(unittest.result.TestResult):
def __init__(self, *, stream, verbosity, warnings, tests,
output_format=OutputFormat.auto, failfast=False, suite):
super().__init__(stream, False, verbosity)
self.verbosity = verbosity
self.catch_warnings = warnings
self.failfast = failfast
self.test_stats = []
self.test_annotations = collections.defaultdict(dict)
self.warnings = []
self.notImplemented = []
self.currently_running = {}
# An index of all seen warnings to keep track
# of repeated warnings.
self._warnings = {}
self.suite = suite
if (output_format is OutputFormat.verbose or
(output_format is OutputFormat.auto and self.verbosity > 1)):
self.ren = VerboseRenderer(tests=tests, stream=stream)
elif (output_format is OutputFormat.stacked or
(output_format is OutputFormat.auto and stream.isatty() and
click.get_terminal_size()[0] > 60 and
os.name != 'nt')):
self.ren = MultiLineRenderer(tests=tests, stream=stream)
else:
self.ren = SimpleRenderer(tests=tests, stream=stream)
def report_progress(self, test, marker, description=None):
self.currently_running.pop(test, None)
self.ren.report(
test,
marker,
description,
currently_running=list(self.currently_running),
)
def record_test_stats(self, test, stats):
self.test_stats.append((test, stats))
def annotate_test(self, test, annotations: Dict[str, Any]) -> None:
self.test_annotations[test].update(annotations)
def get_test_annotations(self, test) -> Optional[Dict[str, Any]]:
return self.test_annotations.get(test)
def _exc_info_to_string(self, err, test):
# Errors are serialized in the worker.
return err
def getDescription(self, test):
return self.ren.format_test(test)
def startTest(self, test):
super().startTest(test)
self.currently_running[test] = True
def addSuccess(self, test):
super().addSuccess(test)
self.report_progress(test, Markers.passed)
def addError(self, test, err):
super().addError(test, err)
self.report_progress(test, Markers.errored)
if self.failfast:
self.suite.stop_requested = True
def addFailure(self, test, err):
super().addFailure(test, err)
self.report_progress(test, Markers.failed)
if self.failfast:
self.suite.stop_requested = True
def addSubTest(self, test, subtest, err):
if err is not None:
self.errors.append((subtest, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.ren.report(
subtest,
Markers.errored,
currently_running=list(self.currently_running))
if self.failfast:
self.suite.stop_requested = True
def addSkip(self, test, reason):
super().addSkip(test, reason)
self.report_progress(test, Markers.skipped)
def addExpectedFailure(self, test, err):
method = getattr(test, test._testMethodName)
try:
reason = method.__et_xfail_reason__
not_impl = getattr(method, '__et_xfail_not_implemented__', False)
except AttributeError:
# Maybe the whole test case class is decorated?
reason = getattr(test, '__et_xfail_reason__', None)
not_impl = getattr(test, '__et_xfail_not_implemented__', False)
marker = Markers.not_implemented if not_impl else Markers.xfailed
if not_impl:
self.notImplemented.append(
(test, self._exc_info_to_string(err, test)))
else:
super().addExpectedFailure(test, err)
self.report_progress(test, marker, reason)
def addUnexpectedSuccess(self, test):
super().addUnexpectedSuccess(test)
self.report_progress(test, Markers.upassed)
def addWarning(self, test, wmsg):
if not self.catch_warnings:
return
key = str(wmsg.message), wmsg.filename, wmsg.lineno
if key not in self._warnings:
self._warnings[key] = wmsg
self.warnings.append((test, warnings.formatwarning(
wmsg.message, wmsg.category, wmsg.filename, wmsg.lineno,
wmsg.line
)))
def wasSuccessful(self):
# Overload TestResult.wasSuccessful to ignore unexpected successes
return (len(self.failures) == len(self.errors) == 0)
class ParallelTextTestRunner:
def __init__(self, *, stream=None, num_workers=1, verbosity=1,
output_format=OutputFormat.auto, warnings=True,
failfast=False, shuffle=False, backend_dsn=None):
self.stream = stream if stream is not None else sys.stderr
self.num_workers = num_workers
self.verbosity = verbosity
self.warnings = warnings
self.failfast = failfast
self.shuffle = shuffle
self.output_format = output_format
self.backend_dsn = backend_dsn
def run(self, test, selected_shard, total_shards, running_times_log_file):
session_start = time.monotonic()
cases = tb.get_test_cases([test])
stats = {}
if running_times_log_file:
running_times_log_file.seek(0)
stats = {
k: (float(v), int(c))
for k, v, c in csv.reader(running_times_log_file)
}
cases = tb.get_cases_by_shard(
cases, selected_shard, total_shards, self.verbosity, stats,
)
setup = tb.get_test_cases_setup(cases)
lang_setup = tb_lang.get_test_cases_setup(cases)
bootstrap_time_taken = 0
tests_time_taken = 0
result = None
cluster = None
conn = None
setup_stats = []
if lang_setup:
tb_lang.run_test_cases_setup(lang_setup, jobs=self.num_workers)
try:
if setup:
if self.verbosity >= 1:
self._echo(
'Populating test databases... ',
fg='white',
nl=False,
)
if self.verbosity > 1:
self._echo(
'\n -> Bootstrapping EdgeDB instance...',
fg='white',
nl=False,
)
async def _setup():
nonlocal cluster
nonlocal conn
cluster = await tb.init_cluster(
backend_dsn=self.backend_dsn,
cleanup_atexit=False,
)
if self.verbosity > 1:
self._echo(' OK')
conn = cluster.get_connect_args()
if cluster.has_create_database():
return await tb.setup_test_cases(
cases,
conn,
self.num_workers,
verbose=self.verbosity > 1,
)
else:
return []
setup_stats = asyncio.run(_setup())
if cluster.has_create_database():
os.environ.update({
'EDGEDB_TEST_CASES_SET_UP': "skip"
})
else:
os.environ.update({
'EDGEDB_TEST_CASES_SET_UP': "inplace"
})
os.environ.update({
'EDGEDB_TEST_HAS_CREATE_ROLE': str(
cluster.has_create_role()
)
})
bootstrap_time_taken = time.monotonic() - session_start
if self.verbosity >= 1:
self._echo('OK')
start = time.monotonic()
all_tests = list(itertools.chain.from_iterable(
tests for tests in cases.values()))
if self.num_workers > 1:
suite = ParallelTestSuite(
self._sort_tests(cases),
conn,
self.num_workers,
self.backend_dsn,
)
else:
suite = SequentialTestSuite(
self._sort_tests(cases),
conn,
self.backend_dsn,
)
result = ParallelTextTestResult(
stream=self.stream, verbosity=self.verbosity,
warnings=self.warnings, failfast=self.failfast,
output_format=self.output_format,
tests=all_tests, suite=suite)
unittest.signals.registerResult(result)
self._echo()
suite.run(result)
if running_times_log_file:
for test, stat in result.test_stats + setup_stats:
name = str(test)
t = stat['running-time']
at, c = stats.get(name, (0, 0))
stats[name] = (at + (t - at) / (c + 1), c + 1)
running_times_log_file.seek(0)
running_times_log_file.truncate()
writer = csv.writer(running_times_log_file)
for k, v in stats.items():
writer.writerow((k, ) + v)
tests_time_taken = time.monotonic() - start
except KeyboardInterrupt:
raise
finally:
if self.verbosity == 1:
self._echo()
if setup:
self._echo()
self._echo('Shutting down test cluster... ', nl=False)
tb._shutdown_cluster(cluster, destroy=True)
self._echo('OK.')
if result is not None:
self._render_result(
result, bootstrap_time_taken, tests_time_taken)
return result
def _get_term_width(self):
return click.get_terminal_size()[0] or 70
def _echo(self, s='', **kwargs):
if self.verbosity > 0:
click.secho(s, file=self.stream, **kwargs)
def _fill(self, char, **kwargs):
self._echo(char * self._get_term_width(), **kwargs)
def _format_time(self, seconds):
hours = int(seconds // 3600)
seconds %= 3600
minutes = int(seconds // 60)
seconds %= 60
return f'{hours:02d}:{minutes:02d}:{seconds:04.1f}'
def _print_errors(self, result):
uxsuccesses = ((s, '') for s in result.unexpectedSuccesses)
data = zip(
('WARNING', 'ERROR', 'FAIL', 'UNEXPECTED SUCCESS'),
('yellow', 'red', 'red', 'red'),
(result.warnings, result.errors, result.failures, uxsuccesses)
)
for kind, fg, errors in data:
for test, err in errors:
self._fill('=', fg=fg)
self._echo(f'{kind}: {result.getDescription(test)}',
fg=fg, bold=True)
self._fill('-', fg=fg)
if annos := result.get_test_annotations(test):
if phs := annos.get('py-hash-secret'):
phs_hex = binascii.hexlify(phs).decode()
self._echo(f'Py_HashSecret: {phs_hex}')
if prs := annos.get('py-random-seed'):
prs_hex = binascii.hexlify(prs).decode()
self._echo(f'random.seed(): {prs_hex}')
self._fill('-', fg=fg)
srv_tb = None
if _is_exc_info(err):
if isinstance(err[1], edgedb.EdgeDBError):
srv_tb = err[1].get_server_context()
err = unittest.result.TestResult._exc_info_to_string(
result, err, test)
elif isinstance(err, SerializedServerError):
err, srv_tb = err.test_error, err.server_error
if srv_tb:
self._echo('Server Traceback:',
fg='red', bold=True)
self._echo(srv_tb)
self._echo('Test Traceback:',
fg='red', bold=True)
self._echo(err)
def _render_result(self, result, boot_time_taken, tests_time_taken):
self._echo()
if self.verbosity > 0:
self._print_errors(result)
if result.wasSuccessful():
fg = 'green'
outcome = 'SUCCESS'
else:
fg = 'red'
outcome = 'FAILURE'
if self.verbosity > 1:
self._fill('=', fg=fg)
self._echo(outcome, fg=fg, bold=True)
counts = [('tests ran', result.testsRun)]
display = {
'expectedFailures': 'expected failures',
'notImplemented': 'not implemented',
'unexpectedSuccesses': 'unexpected successes',
}
for bit in ['failures', 'errors', 'expectedFailures',
'notImplemented', 'unexpectedSuccesses', 'skipped']:
count = len(getattr(result, bit))
if count:
counts.append((display.get(bit, bit), count))
for bit, count in counts:
self._echo(f' {bit}: ', nl=False)
self._echo(f'{count}', bold=True)
self._echo()
self._echo(f'Running times: ')
if boot_time_taken:
self._echo(' bootstrap: ', nl=False)
self._echo(self._format_time(boot_time_taken), bold=True)
self._echo(' tests: ', nl=False)
self._echo(self._format_time(tests_time_taken), bold=True)
if boot_time_taken:
self._echo(' total: ', nl=False)
self._echo(self._format_time(boot_time_taken + tests_time_taken),
bold=True)
self._echo()
return result
def _sort_tests(self, cases):
serialized_suites = {}
exclusive_suites = set()
exclusive_tests = []
for casecls, tests in cases.items():
gg = getattr(casecls, 'get_parallelism_granularity', None)
granularity = gg() if gg is not None else 'default'
if granularity == 'suite':
serialized_suites[casecls] = unittest.TestSuite(tests)
elif granularity == 'system':
exclusive_tests.extend(tests)
exclusive_suites.add(casecls)
tests = itertools.chain(
serialized_suites.values(),
itertools.chain.from_iterable(
tests for casecls, tests in cases.items()
if (
casecls not in serialized_suites
and casecls not in exclusive_suites
)
),
[unittest.TestSuite(exclusive_tests)],
)
test_list = list(tests)
if self.shuffle:
random.shuffle(test_list)
return test_list
# Disable pickling of traceback objects in multiprocessing.
# Test errors' tracebacks are serialized manually by
# `TestReesult._exc_info_to_string()`. Therefore we need
# to make sure that some random __traceback__ attribute
# doesn't crash the test results queue.
multiprocessing.reduction.ForkingPickler.register(
types.TracebackType,
lambda o: (_restore_Traceback, ()))
def _restore_Traceback():
return None
|
[] |
[] |
[
"EDGEDB_TEST_PARALLEL",
"EDGEDB_TEST_CLUSTER_ADDR",
"EDGEDB_TEST_BACKEND_DSN"
] |
[]
|
["EDGEDB_TEST_PARALLEL", "EDGEDB_TEST_CLUSTER_ADDR", "EDGEDB_TEST_BACKEND_DSN"]
|
python
| 3 | 0 | |
psana/setup.py
|
import os
import sys
import numpy as np
from setuptools import setup, Extension, find_packages
# HIDE WARNING:
# cc1plus: warning: command line option "-Wstrict-prototypes" is valid for C/ObjC but not for C++
from distutils.sysconfig import get_config_vars
cfg_vars = get_config_vars()
for k, v in cfg_vars.items():
if type(v) == str:
cfg_vars[k] = v.replace("-Wstrict-prototypes", "")
print('Begin: %s' % ' '.join(sys.argv))
instdir_env = os.environ.get('INSTDIR')
if not instdir_env:
raise Exception('Parameter --instdir is missing')
instdir = instdir_env
# Shorter BUILD_LIST can be used to speedup development loop.
#Command example: ./build_all.sh -b PEAKFINDER:HEXANODE:CFD -md
BUILD_LIST = ('PSANA','SHMEM','PEAKFINDER','HEXANODE','DGRAM','HSD','CFD','NDARRAY')# ,'XTCAV')
build_list_env = os.environ.get('BUILD_LIST')
if build_list_env:
BUILD_LIST = build_list_env.split(':')
#print('Build c++ python-extensions: %s' % s_exts)
# allows a version number to be passed to the setup
VERSION = '0.0.0'
version_env = os.environ.get('VERSION')
if version_env:
VERSION = version_env
print('-- psana.setup.py build extensions : %s' % ' '.join(BUILD_LIST))
print('-- psana.setup.py install directory : %s' % instdir)
print('-- psana.setup.py include sys.prefix: %s' % sys.prefix)
print('-- psana.setup.py np.get_include() : %s' % np.get_include())
if sys.platform == 'darwin':
# Flag -Wno-cpp hides warning:
#warning "Using deprecated NumPy API, disable it with " "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-W#warnings]
macos_sdk_version_arg = '-mmacosx-version-min=10.9'
extra_c_compile_args = ['-Wno-#warnings', macos_sdk_version_arg]
extra_cxx_compile_args = ['-std=c++11', '-Wno-#warnings', macos_sdk_version_arg]
extra_link_args = [macos_sdk_version_arg]
# Use libgomp instead of the version provided by the compiler. Passing plain -fopenmp uses the llvm version of OpenMP
# which appears to have a conflict with the numpy we use from conda. numpy uses Intel MKL which itself uses OpenMP,
# but this seems to cause crashes if you use the llvm OpenMP in the same process.
openmp_compile_args = ['-fopenmp=libgomp']
openmp_link_args = ['-fopenmp=libgomp']
else:
# Flag -Wno-cpp hides warning:
#warning "Using deprecated NumPy API, disable it with " "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-Wcpp]
extra_c_compile_args=['-Wno-cpp']
extra_cxx_compile_args=['-std=c++11', '-Wno-cpp']
extra_link_args = []
# Use the version of openmp provided by the compiler
openmp_compile_args = ['-fopenmp']
openmp_link_args = ['-fopenmp']
extra_link_args_rpath = extra_link_args + ['-Wl,-rpath,'+ os.path.abspath(os.path.join(instdir, 'lib'))]
CYT_BLD_DIR = 'build'
from Cython.Build import cythonize
# defaults if the build list is empty
PACKAGES = []
EXTS = []
CYTHON_EXTS = []
INSTALL_REQS = []
PACKAGE_DATA = {}
ENTRY_POINTS = {}
if 'PSANA' in BUILD_LIST :
dgram_module = Extension('psana.dgram',
sources = ['src/dgram.cc'],
libraries = ['xtc'],
include_dirs = ['src', np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
extra_link_args = extra_link_args_rpath,
extra_compile_args = extra_cxx_compile_args)
container_module = Extension('psana.container',
sources = ['src/container.cc'],
libraries = ['xtc'],
include_dirs = [np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
extra_link_args = extra_link_args_rpath,
extra_compile_args = extra_cxx_compile_args)
PACKAGES = find_packages()
PACKAGE_DATA = {'psana.graphqt': ['data/icons/*.png','data/icons/*.gif']}
EXTS = [dgram_module, container_module]
INSTALL_REQS = [
'numpy',
]
ENTRY_POINTS = {
'console_scripts': [
'convert_npy_to_txt = psana.pyalgos.app.convert_npy_to_txt:do_main',
'convert_txt_to_npy = psana.pyalgos.app.convert_txt_to_npy:do_main',
'merge_mask_ndarrays = psana.pyalgos.app.merge_mask_ndarrays:do_main',
'merge_max_ndarrays = psana.pyalgos.app.merge_max_ndarrays:do_main',
'cdb = psana.pscalib.app.cdb:cdb_cli',
'proc_info = psana.pscalib.app.proc_info:do_main',
'proc_control = psana.pscalib.app.proc_control:do_main',
'proc_new_datasets = psana.pscalib.app.proc_new_datasets:do_main',
'timeconverter = psana.graphqt.app.timeconverter:timeconverter',
'calibman = psana.graphqt.app.calibman:calibman_gui',
'hdf5explorer = psana.graphqt.app.hdf5explorer:hdf5explorer_gui',
'screengrabber = psana.graphqt.ScreenGrabberQt5:run_GUIScreenGrabber',
'detnames = psana.app.detnames:detnames',
'config_dump = psana.app.config_dump:config_dump',
'xtcavDark = psana.xtcav.app.xtcavDark:__main__',
'xtcavLasingOff = psana.xtcav.app.xtcavLasingOff:__main__',
'xtcavLasingOn = psana.xtcav.app.xtcavLasingOn:__main__',
'xtcavDisplay = psana.xtcav.app.xtcavDisplay:__main__',
'shmemClientSimple = psana.app.shmemClientSimple:main',
'epix10ka_pedestals_calibration = psana.app.epix10ka_pedestals_calibration:do_main',
'epix10ka_deploy_constants = psana.app.epix10ka_deploy_constants:do_main',
'epix10ka_raw_calib_image = psana.app.epix10ka_raw_calib_image:do_main',
'epix10ka_calib_components = psana.app.epix10ka_calib_components:__main__',
'datinfo = psana.app.datinfo:do_main',
'det_dark_proc = psana.app.det_dark_proc:do_main',
'parallel_proc = psana.app.parallel_proc:do_main',
'iv = psana.graphqt.app.iv:image_viewer',
]
}
if 'SHMEM' in BUILD_LIST and sys.platform != 'darwin':
ext = Extension('shmem',
sources=["psana/shmem/shmem.pyx"],
libraries = ['xtc','shmemcli'],
include_dirs = [np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args_rpath,
)
CYTHON_EXTS.append(ext)
if 'PEAKFINDER' in BUILD_LIST :
ext = Extension("peakFinder",
sources=["psana/peakFinder/peakFinder.pyx",
"psana/peakFinder/src/PeakFinderAlgos.cc",
"psana/peakFinder/src/LocalExtrema.cc"],
libraries = ['utils'], # for SysLog
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args_rpath,
include_dirs=[np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
)
CYTHON_EXTS.append(ext)
# direct LCLS1 version of peak-finders
ext = Extension("psalg_ext",
sources=["psana/peakFinder/psalg_ext.pyx",
"psana/peakFinder/src/PeakFinderAlgosLCLS1.cc",
"psana/peakFinder/src/LocalExtrema.cc"],
libraries = ['utils'], # for SysLog
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args_rpath,
include_dirs=[np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
)
CYTHON_EXTS.append(ext)
ext = Extension("peakfinder8",
sources=["psana/peakFinder/peakfinder8.pyx",
"psana/peakFinder/peakfinder8.cc"],
libraries = ['utils'], # for SysLog
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args_rpath,
include_dirs=[np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
)
CYTHON_EXTS.append(ext)
if 'HEXANODE' in BUILD_LIST :
# ugly: only build hexanode apps if the roentdek software exists.
# this is a rough python equivalent of the way cmake finds out whether
# packages exist. - cpo
if(os.path.isfile(os.path.join(sys.prefix, 'lib', 'libResort64c_x64.a'))):
ext = Extension("hexanode",
sources=["psana/hexanode/hexanode_ext.pyx",
"psana/hexanode/src/cfib.cc",
"psana/hexanode/src/wrap_resort64c.cc",
"psana/hexanode/src/SortUtils.cc",
"psana/hexanode/src/LMF_IO.cc"],
language="c++",
extra_compile_args = extra_cxx_compile_args,
include_dirs=[os.path.join(sys.prefix,'include'), np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib'), os.path.join(sys.prefix, 'lib')],
libraries=['Resort64c_x64'],
extra_link_args = extra_link_args,
)
CYTHON_EXTS.append(ext)
if 'HEXANODE_TEST' in BUILD_LIST :
if(os.path.isfile(os.path.join(sys.prefix, 'lib', 'libResort64c_x64.a'))):
ext = Extension("hexanode",
sources=["psana/hexanode/test_ext.pyx",
"psana/hexanode/src/LMF_IO.cc",
"psana/hexanode/src/cfib.cc"],
language="c++",
extra_compile_args = extra_cxx_compile_args,
include_dirs=[np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
extra_link_args = extra_link_args,
)
CYTHON_EXTS.append(ext)
if 'CFD' in BUILD_LIST :
ext = Extension("constFracDiscrim",
sources=["psana/constFracDiscrim/constFracDiscrim.pyx",
"psana/constFracDiscrim/src/ConstFracDiscrim.cc"],
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args,
include_dirs=[os.path.join(sys.prefix,'include'), np.get_include(), os.path.join(instdir, 'include')],
)
CYTHON_EXTS.append(ext)
if 'DGRAM' in BUILD_LIST :
ext = Extension('dgramCreate',
#packages=['psana.peakfinder',],
sources=["psana/peakFinder/dgramCreate.pyx"],
libraries = ['xtc'],
include_dirs = [np.get_include(), os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
language="c++",
extra_compile_args = extra_cxx_compile_args,
extra_link_args = extra_link_args_rpath,
# include_dirs=[np.get_include(), "../install/include"]
)
CYTHON_EXTS.append(ext)
ext = Extension("psana.dgramchunk",
sources=["src/dgramchunk.pyx"],
extra_compile_args=extra_c_compile_args,
extra_link_args=extra_link_args,
)
CYTHON_EXTS.append(ext)
ext = Extension("psana.smdreader",
sources=["psana/smdreader.pyx"],
include_dirs=["psana"],
#extra_compile_args=extra_c_compile_args,
extra_compile_args=extra_c_compile_args + openmp_compile_args,
#extra_link_args=extra_link_args,
extra_link_args=extra_link_args + openmp_link_args,
)
CYTHON_EXTS.append(ext)
ext = Extension("psana.eventbuilder",
sources=["psana/eventbuilder.pyx"],
include_dirs=["psana"],
extra_compile_args=extra_c_compile_args,
extra_link_args=extra_link_args,
)
CYTHON_EXTS.append(ext)
ext = Extension("psana.parallelreader",
sources=["psana/parallelreader.pyx"],
include_dirs=["psana"],
extra_compile_args=extra_c_compile_args + openmp_compile_args,
extra_link_args=extra_link_args + openmp_link_args,
)
CYTHON_EXTS.append(ext)
if 'HSD' in BUILD_LIST :
ext = Extension("hsd",
sources=["psana/hsd/hsd.pyx"],
libraries=[],
language="c++",
extra_compile_args=extra_cxx_compile_args,
include_dirs=[np.get_include(),
"../install/include",
os.path.join(instdir, 'include')],
library_dirs = [os.path.join(instdir, 'lib')],
extra_link_args = extra_link_args_rpath,
)
CYTHON_EXTS.append(ext)
#if 'NDARRAY' in BUILD_LIST :
# ext = Extension("ndarray",
# sources=["psana/pycalgos/NDArray_ext.pyx",
# "psana/peakFinder/src/WFAlgos.cc"],
# language="c++",
# extra_compile_args = extra_cxx_compile_args,
# include_dirs=[os.path.join(sys.prefix,'include'), np.get_include(), os.path.join(instdir, 'include')],
# library_dirs = [os.path.join(instdir, 'lib')],
# libraries=[],
# extra_link_args = extra_link_args,
# )
# CYTHON_EXTS.append(ext)
setup(
name = 'psana',
version = VERSION,
license = 'LCLS II',
description = 'LCLS II analysis package',
install_requires = INSTALL_REQS,
packages = PACKAGES,
package_data = PACKAGE_DATA,
#cmdclass={'build_ext': my_build_ext},
ext_modules = EXTS + cythonize(CYTHON_EXTS, build_dir=CYT_BLD_DIR, language_level=2, annotate=True),
entry_points = ENTRY_POINTS,
)
|
[] |
[] |
[
"VERSION",
"BUILD_LIST",
"INSTDIR"
] |
[]
|
["VERSION", "BUILD_LIST", "INSTDIR"]
|
python
| 3 | 0 | |
test/src/main/java/io/strimzi/test/TestUtils.java
|
/*
* Copyright 2018, Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.test;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator;
import com.fasterxml.jackson.dataformat.yaml.YAMLMapper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.AbstractMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.BooleanSupplier;
import java.util.function.Function;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import static java.util.Arrays.asList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assume.assumeTrue;
public final class TestUtils {
private static final Logger LOGGER = LogManager.getLogger(TestUtils.class);
public static final String LINE_SEPARATOR = System.getProperty("line.separator");
public static final String CRD_TOPIC = "../install/cluster-operator/043-Crd-kafkatopic.yaml";
public static final String CRD_KAFKA = "../install/cluster-operator/040-Crd-kafka.yaml";
public static final String CRD_KAFKA_CONNECT = "../install/cluster-operator/041-Crd-kafkaconnect.yaml";
public static final String CRD_KAFKA_CONNECT_S2I = "../install/cluster-operator/042-Crd-kafkaconnects2i.yaml";
public static final String CRD_KAFKA_USER = "../install/cluster-operator/044-Crd-kafkauser.yaml";
public static final String CRD_KAFKA_MIRROR_MAKER = "../install/cluster-operator/045-Crd-kafkamirrormaker.yaml";
private TestUtils() {
// All static methods
}
/** Returns a Map of the given sequence of key, value pairs. */
public static <T> Map<T, T> map(T... pairs) {
if (pairs.length % 2 != 0) {
throw new IllegalArgumentException();
}
Map<T, T> result = new HashMap<>(pairs.length / 2);
for (int i = 0; i < pairs.length; i += 2) {
result.put(pairs[i], pairs[i + 1]);
}
return result;
}
/**
* Poll the given {@code ready} function every {@code pollIntervalMs} milliseconds until it returns true,
* or throw a TimeoutException if it doesn't returns true within {@code timeoutMs} milliseconds.
* @return The remaining time left until timeout occurs
* (helpful if you have several calls which need to share a common timeout),
* */
public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready) {
return waitFor(description, pollIntervalMs, timeoutMs, ready, () -> { });
}
public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready, Runnable onTimeout) {
LOGGER.debug("Waiting for {}", description);
long deadline = System.currentTimeMillis() + timeoutMs;
while (true) {
boolean result = ready.getAsBoolean();
long timeLeft = deadline - System.currentTimeMillis();
if (result) {
return timeLeft;
}
if (timeLeft <= 0) {
onTimeout.run();
throw new TimeoutException("Timeout after " + timeoutMs + " ms waiting for " + description + " to be ready");
}
long sleepTime = Math.min(pollIntervalMs, timeLeft);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("{} not ready, will try again in {} ms ({}ms till timeout)", description, sleepTime, timeLeft);
}
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
return deadline - System.currentTimeMillis();
}
}
}
public static String indent(String s) {
StringBuilder sb = new StringBuilder();
String[] lines = s.split("[\n\r]");
for (String line : lines) {
sb.append(" ").append(line).append(System.lineSeparator());
}
return sb.toString();
}
public static String getFileAsString(String filePath) {
try {
return new String(Files.readAllBytes(Paths.get(filePath)), "UTF-8");
} catch (IOException e) {
LOGGER.info("File with path {} not found", filePath);
}
return "";
}
public static String changeOrgAndTag(String image, String newOrg, String newTag) {
return image.replaceFirst("^strimzi/", newOrg + "/").replaceFirst(":[^:]+$", ":" + newTag);
}
public static String changeOrgAndTag(String image) {
String strimziOrg = "strimzi";
String strimziTag = "latest";
String dockerOrg = System.getenv().getOrDefault("DOCKER_ORG", strimziOrg);
String dockerTag = System.getenv().getOrDefault("DOCKER_TAG", strimziTag);
return changeOrgAndTag(image, dockerOrg, dockerTag);
}
/**
* Read the classpath resource with the given resourceName and return the content as a String
* @param cls The class relative to which the resource will be loaded.
* @param resourceName The name of the resource
* @return The resource content
*/
public static String readResource(Class<?> cls, String resourceName) {
try {
URL url = cls.getResource(resourceName);
if (url == null) {
return null;
} else {
return new String(
Files.readAllBytes(Paths.get(
url.toURI())),
StandardCharsets.UTF_8);
}
} catch (IOException | URISyntaxException e) {
throw new RuntimeException(e);
}
}
/**
* Read loaded resource as an InputStream and return the content as a String
* @param stream Loaded resource
* @return The resource content
*/
public static String readResource(InputStream stream) {
StringBuilder textBuilder = new StringBuilder();
try (Reader reader = new BufferedReader(new InputStreamReader(
stream, Charset.forName(StandardCharsets.UTF_8.name()))
)) {
int character = 0;
while ((character = reader.read()) != -1) {
textBuilder.append((char) character);
}
} catch (IOException e) {
e.printStackTrace();
}
return textBuilder.toString();
}
public static String readFile(File file) {
try {
if (file == null) {
return null;
} else {
return new String(
Files.readAllBytes(file.toPath()),
StandardCharsets.UTF_8);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Assert that the given actual string is the same as content of the
* the classpath resource resourceName.
* @param cls The class relative to which the resource will be loaded.
* @param resourceName The name of the resource
* @param actual The actual
* @throws IOException
*/
public static void assertResourceMatch(Class<?> cls, String resourceName, String actual) throws IOException {
String r = readResource(cls, resourceName);
assertEquals(r, actual);
}
public static <T> Set<T> set(T... elements) {
return new HashSet(asList(elements));
}
public static <T> T fromYaml(String resource, Class<T> c) {
return fromYaml(resource, c, false);
}
public static <T> T fromYaml(String resource, Class<T> c, boolean ignoreUnknownProperties) {
URL url = c.getResource(resource);
if (url == null) {
return null;
}
ObjectMapper mapper = new YAMLMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, !ignoreUnknownProperties);
try {
return mapper.readValue(url, c);
} catch (InvalidFormatException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static <T> T fromYamlString(String yamlContent, Class<T> c) {
return fromYamlString(yamlContent, c, false);
}
public static <T> T fromYamlString(String yamlContent, Class<T> c, boolean ignoreUnknownProperties) {
ObjectMapper mapper = new YAMLMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, !ignoreUnknownProperties);
try {
return mapper.readValue(yamlContent, c);
} catch (InvalidFormatException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static <T> String toYamlString(T instance) {
ObjectMapper mapper = new YAMLMapper()
.disable(YAMLGenerator.Feature.USE_NATIVE_TYPE_ID)
.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
try {
return mapper.writeValueAsString(instance);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
/** @deprecated you should be using yaml, no json */
@Deprecated
public static <T> T fromJson(String json, Class<T> c) {
if (json == null) {
return null;
}
ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
try {
return mapper.readValue(json, c);
} catch (JsonMappingException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static String toJsonString(Object instance) {
ObjectMapper mapper = new ObjectMapper()
.setSerializationInclusion(JsonInclude.Include.NON_NULL);
try {
return mapper.writeValueAsString(instance);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
public static void assumeLinux() {
assumeTrue(System.getProperty("os.name").contains("nux"));
}
/** Map Streams utility methods */
public static <K, V> Map.Entry<K, V> entry(K key, V value) {
return new AbstractMap.SimpleEntry<>(key, value);
}
public static <K, U> Collector<Map.Entry<K, U>, ?, Map<K, U>> entriesToMap() {
return Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue);
}
/** Method to create and write file */
public static void writeFile(String filePath, String text) {
Writer writer = null;
try {
writer = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(filePath), StandardCharsets.UTF_8));
writer.write(text);
} catch (IOException e) {
LOGGER.info("Exception during writing text in file");
e.printStackTrace();
} finally {
try {
if (writer != null) {
writer.close();
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
/**
* Changes the {@code subject} of the RoleBinding in the given YAML resource to be the
* {@code strimzi-cluster-operator} {@code ServiceAccount} in the given namespace.
* @param roleBindingFile
* @param namespace
* @return
*/
public static String changeRoleBindingSubject(File roleBindingFile, String namespace) {
YAMLMapper mapper = new YAMLMapper();
try {
JsonNode node = mapper.readTree(roleBindingFile);
ArrayNode subjects = (ArrayNode) node.get("subjects");
ObjectNode subject = (ObjectNode) subjects.get(0);
subject.put("kind", "ServiceAccount")
.put("name", "strimzi-cluster-operator")
.put("namespace", namespace);
return mapper.writeValueAsString(node);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static String getContent(File file, Function<JsonNode, String> edit) {
YAMLMapper mapper = new YAMLMapper();
try {
JsonNode node = mapper.readTree(file);
return edit.apply(node);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
main.py
|
"""
Set your env like the example below:
token=
mysql=
default=
dev=
"""
def temporary_fix():
from shutil import copyfile
copyfile("./utils/post.py","/opt/virtualenvs/python3/lib/python3.8/site-packages/instascrape/scrapers/post.py")
import os
import sys
import subprocess
sys.path.insert(1,f"{os.getcwd()}/utils/")
sys.path.insert(1,f"{os.getcwd()}/src")
sys.path.insert(1,f"{os.getcwd()}/cogs")
print("Booting up")
temporary_fix()
from keep_alive import keep_alive
import string
import nextcord
from utils import helping_hand
from random import choice
from nextcord import Interaction, SlashOption, ChannelType
from nextcord.ext import commands, tasks
from nextcord.abc import GuildChannel
from GoogleNews import GoogleNews
from dotenv import load_dotenv
from math import *
from statistics import *
from utils.Storage_facility import Variables
from io import StringIO
from contextlib import redirect_stdout
from utils.External_functions import *
import traceback
import youtube_dl
import re as regex
import urllib.request
import ffmpeg
import time
import emoji
import psutil
import asyncio
import requests
import aiohttp
from io import BytesIO
import src.error as ror
from utils.spotify_client import *
import assets
location_of_file = os.getcwd()
start_time = time.time()
try:
load_dotenv()
except:
pass
import speedtest
try:
st_speed = speedtest.Speedtest()
except:
print("failed")
googlenews = GoogleNews()
global sent
global past_respose, generated
observer=[]
mspace={}
past_respose = []
generated = []
deathrate = {}
sent = None
instagram_posts = []
intents = nextcord.Intents().default()
intents.members = True
old_youtube_vid = {}
youtube_cache = {}
deleted_message = {}
config = {
'snipe': [841026124174983188, 822445271019421746,830050310181486672, 912569937116147772],
'respond': [],
'youtube': {},
'welcome': {},
'ticket' : {},
'security':{},
'commands':{},
'reactions':{}
}
da = {}
errors = ["```arm"]
da1 = {}
queue_song = {}
temporary_list = []
dev_channel = int(os.getenv("dev"))
re = [0, "OK", {}, {}, -1, "", "205", {}, 5360, "48515587275%3A0AvceDiA27u1vT%3A26",1]
a_channels = [822500785765875749, 822446957288357888]
cat = {}
youtube = []
pages = {}
autor = {}
SESSIONID = None
color_message = None
color_temp = ()
vc_channel = {}
wolfram = os.getenv("wolfram")
prefix_dict = {}
# replace your id with this
dev_users = ["432801163126243328"]
ydl_op = {
"format": "bestaudio/best",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "384",
}
],
}
FFMPEG_OPTIONS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn",
}
print("Starting")
async def search_vid(name):
pass
def prefix_check(client, message):
return prefix_dict.get(message.guild.id if message.guild is not None else None, "'"),f"<@{client.user.id}> "
client = nextcord.ext.commands.Bot(
command_prefix=prefix_check,
intents=intents,
case_insensitive=True,
)
def save_to_file():
global dev_users
print("save")
v = Variables("storage")
v.pass_all(
da = client.da,
mspace = client.mspace,
da1 = client.da1,
queue_song = client.queue_song,
a_channels = a_channels,
re = re,
dev_users = dev_users,
prefix_dict = prefix_dict,
observer = observer,
old_youtube_vid = old_youtube_vid,
config = config,
autor = autor
)
v.save()
def load_from_file():
global da
global da1
global queue_song
global re
global dev_users
global prefix_dict
global observer
global old_youtube_vid
global config
global mspace
global autor
v = Variables("storage").show_data()
da = v.get("da",{})
da1 = v.get("da1", {})
queue_song = v.get("queue_song",{})
a_channels = v.get("a_channels",[])
re = v.get("re",re)
dev_users = v.get("dev_users",dev_users)
prefix_dict = v.get("prefix_dict",{})
observer = v.get("observer",[])
old_youtube_vid = v.get("old_youtube_vid",{})
config = v.get("config",config)
mspace = v.get("mspace",{})
autor = v.get("autor",{})
#using these to pass to other files like cogs
client.re = re
client.dev_users = dev_users
client.config = config
client.prefix_dict = prefix_dict
client.da = da
client.da1 = da1
client.queue_song = queue_song
client.mspace = mspace
client.observer = observer
load_from_file()
report = f"""Started at: {timestamp(int(start_time))}
Current location: {location_of_file}
Requests: {re[0]:,}
Color: {nextcord.Color(re[8]).to_rgb()}
```yml
[ OK ] Loaded all modules
[ OK ] Setup SpeedTest and GoogleNews
[ OK ] Variables initialised
[ OK ] Load From File Completed
[ OK ] Switching Root ...
"""
for i in os.listdir(location_of_file + "/src"):
if i.endswith(".py"):
a = ""
try:
print(i, end="")
requi = __import__(i[0 : len(i) - 3]).requirements()
if type(requi) is str:
a = f"__import__('{i[0:len(i)-3]}').main(client,{requi})"
eval(a)
if type(requi) is list:
a = f"__import__('{i[0:len(i)-3]}').main(client,{','.join(requi)})"
eval(a)
print(": Done")
report+=f"[ OK ] Imported {i} successfully\n"
except Exception as e:
print(": Error")
report+=f"[ {int(time.time()-start_time)} ] Error in {i}: {e}\n{a} \n"
errors.append(f"[ {int(time.time()-start_time)} ] Error in {i}: {str(e)[:10]}...\n")
@client.event
async def on_ready():
print(client.user)
global report
await client.change_presence(activity=nextcord.Activity(type=nextcord.ActivityType.watching, name="Booting in progress"))
report+=f"[ OK ] Starting On Ready\n[ OK ] Bot named as {client.user.name}\n"
channel = client.get_channel(dev_channel)
if channel:
report+="[ OK ] Devop found, let's go\n"
try:
print("Starting Load from file")
load_from_file()
print("Finished loading\n")
print("\nStarting devop display")
await devop_mtext(client, channel, re[8])
report+="[ OK ] Sending Devop Message\n"
print("Finished devop display")
await client.rollout_application_commands()
with open("commands.txt","w") as f:
for i in client.commands:
f.write(i.name+"\n")
report+="[ OK ] Updated commands txt file"
except Exception as e:
mess = await channel.send(
embed=nextcord.Embed(
title="Error in the function on_ready",
description=str(e),
color=nextcord.Color(value=re[8]),
)
)
await mess.add_reaction("❌")
dev_loop.start()
print("Prepared")
youtube_loop.start()
send_file_loop.start()
report+="```"
await channel.send(
embed=cembed(
title="Report",
description=report,
color=re[8],
thumbnail=client.user.avatar.url
)
)
@tasks.loop(hours=4)
async def send_file_loop():
save_to_file()
await client.get_channel(941601738815860756).send(file=nextcord.File("storage.dat",filename="storage.dat"))
@tasks.loop(minutes=30)
async def youtube_loop():
await client.change_presence(activity=nextcord.Activity(type=nextcord.ActivityType.listening, name=str(len(client.guilds))+" servers"))
print("Youtube_loop")
for i,l in config['youtube'].items():
await asyncio.sleep(2)
for j in l:
try:
a = get_youtube_url(j[0])
if a[0]=="https://www.youtube.com/" or a[0]=="https://www.youtube.com":
return
if not old_youtube_vid.get(i, None):
old_youtube_vid[i] = {}
if not old_youtube_vid[i].get(j[0], None):
old_youtube_vid[i][j[0]] = ""
if old_youtube_vid[i][j[0]] == a[0]:
continue
old_youtube_vid[i][j[0]] = a[0]
try:
message=j[1]
await client.get_channel(i).send(embed=cembed(title="New Video out", description=f"New Video from {j[0]}",url=a[0],color=re[8],thumbnail=client.get_channel(i).guild.icon.url))
await client.get_channel(i).send(a[0]+"\n"+message)
except Exception as e:
await client.get_channel(dev_channel).send(embed=cembed(title="Error in youtube_loop",description=f"{str(e)}\nSomething is wrong with channel no. {i}",color=re[8]))
except: pass
@tasks.loop(seconds = 30)
async def dev_loop():
save_to_file()
try:
await get_async("https://tessarect.prakarsh17.senarc.org")
await get_async("https://suicide-detector-api-1.yashvardhan13.repl.co/")
await get_async("https://Ellisa-Bot.arghyathegod.repl.co")
except:
pass
@client.slash_command(name = "embed", description = "Create a quick embed using slash commands")
async def quickembed(ctx, text):
await ctx.send(
embed=cembed(
description = text,
color=re[8]
)
)
@client.command()
@commands.check(check_command)
async def svg(ctx, *, url):
img = svg2png(url)
await ctx.send(file=nextcord.File(BytesIO(img), "svg.png"))
@dev_loop.before_loop
async def wait_for_ready():
await client.wait_until_ready()
@send_file_loop.before_loop
async def wait_for_ready():
await client.wait_until_ready()
@youtube_loop.before_loop
async def wait_for_ready():
await client.wait_until_ready()
@client.slash_command(name = "giveaway", description = "You can use this for giveaway")
async def giveaway(ctx, donor:nextcord.User = None, required_role:nextcord.Role = " ", heading = "Giveaway", description = "Giveaway", emoji = emoji.emojize(":party_popper:"), image = "https://media.discordapp.net/attachments/960070023563603968/963041700996063282/standard_6.gif"):
await ctx.response.defer()
if not ctx.user.guild_permissions.manage_channels:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="You need admin permission to access this function",
color=re[8]
)
)
return
if heading is None: heading = "Giveaway"
if donor is None:donor = ctx.user
embed=cembed(
title=heading,
description=description,
color=re[8],
thumbnail=client.user.avatar.url,
image=image
)
embed.set_author(name=donor.name,icon_url=safe_pfp(donor))
m = await ctx.send(f"Giveaway!! Requirement: {required_role.mention if required_role !=' ' else required_role}",embed=embed)
await m.add_reaction(emoji)
@client.command()
@commands.check(check_command)
@commands.cooldown(1,10,commands.BucketType.guild)
async def roll(ctx):
if not ctx.author.guild_permissions.manage_channels:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="You need admin permission to access this function"
)
)
return
if not ctx.message.reference:
await ctx.send("You need to reply to a giveaway message by Alfred")
return
message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
if not message.author == client.user:
await ctx.reply("Heyyyyy, wait a minute, that's not my giveaway mesage")
return
if not message.clean_content.startswith("Giveaway"):
await ctx.reply("Ok that's my messsage, but is that a giveaway message?????")
return
reaction = message.reactions[0]
users = await reaction.users().flatten()
users.remove(client.user)
roles = message.raw_role_mentions
print(roles)
if len(roles) > 0: roles = roles[0]
if type(roles) == int: roles = ctx.guild.get_role(roles)
for i in users.copy():
if roles != [] and roles not in i.roles:
users.remove(i)
await reaction.remove(i)
await message.edit(
embed=cembed(
title="Time up",
description="The giveaway has ended, hope you get it the next time",
color=re[8],
thumbnail=client.user.avatar.url
)
)
lu = random.choice(users)
await reaction.remove(lu)
lu = lu.mention
await ctx.send(f"Congratulations, {lu} has won the giveaway")
@client.command(aliases=['autoreaction'])
@commands.check(check_command)
async def autoreact(ctx, channel: nextcord.TextChannel = None,*, Emojis: str = ""):
if not getattr(ctx, 'author', getattr(ctx, 'user', None)).guild_permissions.administrator:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="You cannot set autoreact, you do not have admin privilege",
color=re[8]
)
)
return
if not channel:
await ctx.send(
embed=cembed(
title="Hmm",
description=emoji.emojize("You need to mention a channel\n'autoreact #channel :one:|:two:|:three:"),
color=re[8]
)
)
return
if Emojis == "":
await ctx.send(
embed = cembed(
title="Hmm",
description="You need one or more emojis separated by |",
color=re[8]
)
)
return
if channel.id not in autor:
autor[channel.id]=[i.strip() for i in emoji.demojize(Emojis).split("|")]
else:
autor[channel.id]+=[i.strip() for i in emoji.demojize(Emojis).split("|")]
await ctx.send(
embed=cembed(
title="Done",
description=f"For every message in {channel.mention} Alfred will add {Emojis} reaction",
color=re[8]
)
)
@client.command()
@commands.check(check_command)
async def remove_autoreact(ctx, channel: nextcord.TextChannel = None):
if not getattr(ctx, 'author', getattr(ctx, 'user', None)).guild_permissions.administrator:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="You cannot remove autoreact, you do not have admin privilege",
color=re[8]
)
)
return
if not channel.id in autor:
await ctx.send(
embed=cembed(
title="Hmm",
description="This channel does not have any reactions",
color=re[8]
)
)
return
confirmation = await wait_for_confirm(ctx,client,"Do you want to remove every automatic reaction in this channel?",color=re[8],usr=getattr(ctx, 'author', getattr(ctx, 'user', None)))
if not confirmation:
return
autor.pop(channel.id)
await ctx.send(
embed=cembed(
title="Done",
description="Removed every reaction in ",
color=re[8]
)
)
@client.slash_command(name="emoji", description="Get Emojis from other servers")
async def emoji_slash(ctx, emoji_name, number=1):
req()
number=int(number) - 1
if nextcord.utils.get(client.emojis, name=emoji_name) != None:
emoji_list = [names.name for names in client.emojis if names.name == emoji_name]
le = len(emoji_list)
if le >= 2:
if number > le - 1:
number = le - 1
emoji = [names for names in client.emojis if names.name == emoji_name][
number
].id
await ctx.send(str(nextcord.utils.get(client.emojis, id=emoji)))
else:
await ctx.send(
embed=nextcord.Embed(
description="The emoji is not available",
color=nextcord.Color(value=re[8]),
)
)
@client.command(aliases=["e", "emoji"])
@commands.check(check_command)
@commands.cooldown(1,5,commands.BucketType.guild)
async def uemoji(ctx, emoji_name, number=1):
req()
number-=1
try:
await ctx.message.delete()
except:
pass
if emoji_name.startswith(":"):
emoji_name = emoji_name[1:]
if emoji_name.endswith(":"):
emoji_name = emoji_name[:-1]
if nextcord.utils.get(client.emojis, name=emoji_name) != None:
emoji_list = [names.name for names in client.emojis if names.name == emoji_name]
le = len(emoji_list)
if le >= 2:
if number > le - 1:
number = le - 1
user = getattr(ctx, 'author', getattr(ctx, 'user', None))
emoji = [names for names in client.emojis if names.name == emoji_name][number]
webhook = await ctx.channel.create_webhook(name=user.name)
await webhook.send(emoji, username=user.name, avatar_url=safe_pfp(user))
await webhook.delete()
else:
await ctx.send(
embed=nextcord.Embed(
description="The emoji is not available",
color=nextcord.Color(value=re[8]),
)
)
@client.slash_command(name="svg2png", description="Convert SVG image to png format")
async def svg2png_slash(ctx, url):
req()
await ctx.response.defer()
img = svg2png(url)
await ctx.send(file=nextcord.File(BytesIO(img), "svg.png"))
@client.command(aliases=["cw"])
@commands.check(check_command)
async def clear_webhooks(ctx):
webhooks = await ctx.channel.webhooks()
print(webhooks)
for webhook in webhooks:
try:
if webhook.user is client.user:
await webhook.delete()
except Exception as e:
print(e)
await ctx.send(
embed=cembed(
title="Done",
description="Deleted all the webhooks by alfred",
color=re[8],
thumbnail=client.user.avatar.url
)
)
@client.slash_command(name="color",description="Change color theme", guild_ids= [822445271019421746])
async def color_slash(ctx, rgb_color=defa(default="")):
rgb_color = rgb_color.replace("(","").replace(")","").split(",")
if str(ctx.user.id) not in dev_users:
await ctx.send(
embed=cembed(
title="Woopsies",
description="This is a `developer-only` function",
color=discord.Color.red(),
thumbnail=client.user.avatar.url
)
)
return
if len(rgb_color)!=3:
await ctx.send(
embed=cembed(
title="Error",
description="You need RGB values, 3 values seperated with commas\nExample: `(128,128,128)`",
color=re[8],
footer="Give it another try",
thumbnail=client.user.avatar.url
)
)
return
re[8] = discord.Color.from_rgb(*[int(i) for i in rgb_color]).value
if re[8]>16777215: re[8] = 16777215
embed=cembed(
title="Done",
description=f"Color set as {nextcord.Color(re[8]).to_rgb()}\n`{re[8]}`",
color=re[8],
thumbnail = client.user.avatar.url,
footer=f"Executed by {ctx.user.name} in {ctx.channel.name}"
)
await ctx.send(embed=embed)
await client.get_channel(dev_channel).send(embed=embed)
@client.command()
@commands.check(check_command)
async def load(ctx):
print("Load", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
req()
try:
cpu_per = str(int(psutil.cpu_percent()))
cpu_freq = str(int(psutil.cpu_freq().current))
ram = str(psutil.virtual_memory().percent)
swap = str(psutil.swap_memory().percent)
usage = f"""
CPU Percentage: {cpu_per}%
CPU Frequency : {cpu_freq}Mhz
RAM usage: {ram}%
Swap usage: {swap}%
Nextcord: {nextcord.__version__}
"""
embed = nextcord.Embed(
title="Current load",
description='\n'.join([i.strip() for i in usage.split('\n')]),
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar.url)
except Exception as e:
channel = client.get_channel(dev_channel)
embed = nextcord.Embed(
title="Load failed",
description=str(e),
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar.url)
await ctx.channel.send(embed=embed)
@client.slash_command(name="pr", description="Prints what you ask it to print")
async def pr_slash(ctx, text):
req()
await ctx.send(text)
@client.command(aliases=["c"])
@commands.check(check_command)
async def cover_up(ctx):
await ctx.message.delete()
await asyncio.sleep(0.5)
mess = await ctx.send(nextcord.utils.get(client.emojis, name="enrique"))
await mess.delete()
@client.command()
@commands.check(check_command)
async def remove_dev(ctx, member: nextcord.Member):
print(member)
global dev_users
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in ["432801163126243328","803855283821871154","723539849969270894"]:
dev_users.remove(str(member.id))
await ctx.send(member.mention + " is no longer a dev")
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description="Dude! You are not Alvin",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def add_dev(ctx, member: nextcord.Member):
print(member)
print("Add dev", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
global dev_users
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in dev_users:
dev_users.add(str(member.id))
await ctx.send(member.mention + " is a dev now")
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description="Dude! you are not a dev",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def dev_op(ctx):
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in list(dev_users):
print("devop", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
channel = client.get_channel(dev_channel)
await devop_mtext(client, channel, re[8])
else:
await ctx.send(embed=cembed(title="Permission Denied",description="You cannot use the devop function, only a developer can",color=re[8]))
@client.command()
@commands.check(check_command)
async def docs(ctx, name):
try:
if name.find("(") == -1:
await ctx.send(
embed=nextcord.Embed(
title="Docs",
description=str(eval(name + ".__doc__")),
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permissions Denied",
description="Functions are not allowed. Try without the brackets to get the information",
color=nextcord.Color(value=re[8]),
)
)
except Exception as e:
await ctx.send(
embed=nextcord.Embed(
title="Error", description=str(e), color=nextcord.Color(value=re[8])
)
)
@client.slash_command(name="snipe", description="Get the last few deleted messages")
async def snipe_slash(inter, number = 50):
req()
await snipe(inter, number)
@client.command()
@commands.check(check_command)
async def snipe(ctx, number=50):
number = int(number)
if (
getattr(ctx, 'author', getattr(ctx, 'user', None)).guild_permissions.administrator
or ctx.guild.id not in config['snipe']
):
message = deleted_message.get(ctx.channel.id,[("Empty","Nothing to snipe here")])[::-1]
count=0
embeds = []
s = ""
for i in message[:number]:
count+=1
if len(i) < 3:
s+="**" + i[0] + ":**\n" + i[1]+"\n\n"
if count%5==0 or count == len(message) or count == number:
embed=cembed(
title="Snipe",
description=s,
color=re[8],
thumbnail=safe_pfp(ctx.guild)
)
embeds.append(embed)
s=""
else:
await ctx.send("**" + i[0] + ":**",embed=i[1])
if len(embeds)>0:
await assets.pa(ctx, embeds, start_from=0, restricted=True)
else:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="Sorry guys, only admins can snipe now",
color=re[8],
thumbnail=getattr(client.user.avatar,'url'),
)
)
@client.event
async def on_bulk_message_delete(messages):
for i in messages:
await on_message_delete(i)
@client.event
async def on_message_delete(message):
if not message.channel.id in list(deleted_message.keys()):
deleted_message[message.channel.id] = []
if len(message.embeds) <= 0:
if not message.author.bot:
deleted_message[message.channel.id].append(
(str(message.author), message.content)
)
else:
if not message.author.bot:
deleted_message[message.channel.id].append(
(str(message.author), message.embeds[0], True)
)
@client.event
async def on_member_join(member):
print(member.guild)
print("Join")
if member.guild.id in config['welcome']:
channel = client.get_channel(config['welcome'][member.guild.id])
else: return
await channel.send(member.mention + " is here")
embed = nextcord.Embed(
title="Welcome!!!",
description="Welcome to the server, " + member.name,
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(
url="https://image.shutterstock.com/image-vector/welcome-poster-spectrum-brush-strokes-260nw-1146069941.jpg"
)
await channel.send(embed=embed)
if member.guild.id in config['security']:
audit_log = await member.guild.audit_logs(limit=10).flatten()
latest=audit_log[0]
if member.bot:
channel = client.get_channel(config['security'][member.guild.id])
if channel:
await channel.send(
embed=cembed(
title="Bot added",
description=f"{latest.target.mention} was added by {latest.user.mention}, please be careful while handling bots and try not to provide it with all the permissions as it can be dangerous",
color=re[8],
footer="Security alert by Alfred"
)
)
@client.event
async def on_member_remove(member):
print(member.guild)
if member.guild.id in config.get('welcome',[]):
channel = client.get_channel(config['welcome'][member.guild.id])
else: return
embed = cembed(
title="Bye!!!",
description="Hope you enjoyed your stay " + member.name,
color=nextcord.Color(value=re[8]),
thumbnail="https://thumbs.dreamstime.com/b/bye-bye-man-says-45256525.jpg"
)
await channel.send(member.mention + " is no longer here", embed=embed)
if member.guild.id in config['security']:
a = client.get_guild(member.guild.id)
audit_log = await a.audit_logs(limit=10).flatten()
latest = audit_log[0]
if latest.target == member:
channel = client.get_channel(config['security'][member.guild.id])
if latest.action == nextcord.AuditLogAction.ban:
await channel.send(
embed=cembed(
title=f"Banned",
description=f"{latest.user.mention} banned {latest.target.name}",
color=re[8],
footer="Security alert by Alfred",
thumbnail=member.guild.icon.url
)
)
elif latest.action == nextcord.AuditLogAction.kick:
await channel.send(
embed=cembed(
title=f"Kicked",
description=f"{latest.user.mention} kicked {latest.target.name}",
color=re[8],
footer="Security alert by Alfred",
thumbnail=member.guild.icon.url
)
)
@client.command()
@commands.check(check_command)
async def remove(ctx, n):
req()
mem = [names.id for names in ctx.guild.voice_client.channel.members] if ctx.guild.voice_client else []
if mem.count(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) > 0:
if int(n) < len(queue_song[str(ctx.guild.id)]):
await ctx.send(
embed=nextcord.Embed(
title="Removed",
description=da1[queue_song[str(ctx.guild.id)][int(n)]],
color=nextcord.Color(value=re[8]),
)
)
if re[3][str(ctx.guild.id)]>int(n):re[3][str(ctx.guild.id)]-=1
del da1[queue_song[str(ctx.guild.id)][int(n)]]
queue_song[str(ctx.guild.id)].pop(int(n))
else:
await ctx.send(
embed=nextcord.Embed(
title="Not removed",
description="Only "
+ len(queue_song[str(ctx.guild.id)])
+ " song(s) in your queue",
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to modify queue",
color=nextcord.Color(value=re[8]),
)
)
def repeat(ctx, voice):
req()
songs = queue_song.get(str(ctx.guild.id),[])
if len(songs) == 0: return
index = re[3].get(str(ctx.guild.id),0)
if len(songs)<index:
index = 0
re[3][str(ctx.guild.id)]=index
song = songs[index]
if not song in da1.keys():
aa = str(urllib.request.urlopen(song).read().decode())
starting = aa.find("<title>") + len("<title>")
ending = aa.find("</title>")
da1[song] = (
aa[starting:ending]
.replace("'", "'")
.replace(" - YouTube", "")
.replace("&", "&")
)
time.sleep(1)
if re[7].get(ctx.guild.id,-1) == 1 and not voice.is_playing():
re[3][str(ctx.guild.id)] += 1
if re[3][str(ctx.guild.id)] >= len(queue_song[str(ctx.guild.id)]):
re[3][str(ctx.guild.id)] = 0
if re[2].get(ctx.guild.id,-1) == 1 or re[7].get(ctx.guild.id,-1) == 1:
if not voice.is_playing():
URL = youtube_download(ctx, song)
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
@client.command(aliases=["q"])
@commands.check(check_command)
@commands.cooldown(1,5,commands.BucketType.guild)
async def queue(ctx, *, name=""):
req()
st = ""
num = 0
try:
mem = [str(names) for names in ctx.guild.voice_client.channel.members]
except:
mem = []
if mem.count(str(getattr(ctx, 'author', getattr(ctx, 'user', None)))) > 0 and name != "":
if 'spotify' in name:
if 'playlist' in name:
await ctx.send('Enqueued the given Spotify playlist.')
try:
songs = await fetch_spotify_playlist(name, 500)
for song in songs:
try:
name = convert_to_url(song)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
num = 0
name_of_the_song = await get_name(url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
except Exception as e:
print(e)
break
except Exception as e:
print(e)
elif 'track' in name:
name = convert_to_url(name)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
num = 0
name_of_the_song = await get_name(url)
print(name_of_the_song, ":", url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
else:
name = convert_to_url(name)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
await ctx.send("Added to queue")
num = 0
name_of_the_song = await get_name(url)
print(name_of_the_song, ":", url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
for i in queue_song[str(ctx.guild.id)]:
if num >= len(queue_song[str(ctx.guild.id)]) - 10:
if not i in da1.keys():
da1[i] = await get_name(i)
st = st + str(num) + ". " + da1[i].replace(""", "'") + "\n"
num += 1
# st=st+str(num)+". "+da1[i]+"\n"
if st == "":
st = "_Empty_"
em = nextcord.Embed(
title="Queue", description=st, color=nextcord.Color(value=re[8])
)
mess = await ctx.send(embed=em)
if type(ctx) == nextcord.Interaction:
mess = await ctx.original_message()
await player_pages(mess)
elif name == "":
num = 0
st = ""
if len(queue_song[str(ctx.guild.id)]) < 30:
for i in queue_song[str(ctx.guild.id)]:
if not i in da1.keys():
da1[i] = youtube_info(i)["title"]
st = st + str(num) + ". " + da1[i] + "\n"
num += 1
else:
num = re[3].get(str(ctx.guild.id),10)
if num<10: num = 10
for i in range(num-10, num+10):
try:
st += f"{i}. {da1.get(queue_song[str(ctx.guild.id)][i],'Unavailable')}\n"
except: pass
embed = cembed(
title="Queue",
description=st if st != "" else "Empty",
color=re[8],
thumbnail=client.user.avatar.url
)
mess = await ctx.send(embed=embed)
if type(ctx) == nextcord.Interaction:
mess = await ctx.original_message()
await player_pages(mess)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to modify queue",
color=nextcord.Color(value=re[8]),
)
)
async def player_pages(mess):
await player_reaction(mess)
emojis = emoji.emojize(":upwards_button:"),emoji.emojize(":downwards_button:")
def check(reaction, user):
return (
user.id != client.user.id
and str(reaction.emoji) in emojis
and reaction.message.id == mess.id
)
page=re[3][str(mess.guild.id)]//10
while True:
songs = queue_song[str(mess.guild.id)]
try:
reaction, user = await client.wait_for("reaction_add",check=check, timeout=None)
if reaction.emoji == emojis[0] and page>0:
page-=1
elif reaction.emoji == emojis[1] and page<=len(songs):
page+=1
cu = page * 10
st = '\n'.join([f"{i}. {da1[songs[i]]}" for i in range(cu,cu+10) if len(songs)>i])
await mess.edit(
embed=cembed(
title="Queue",
description=st,
color=re[8],
footer='Amazing songs btw, keep going' if len(songs)!=0 else 'Use queue to add some songs'
)
)
await reaction.remove(user)
except asyncio.TimeoutError:
await mess.clear_reactions()
@client.command(aliases=[">", "skip"])
@commands.check(check_command)
async def next(ctx):
req()
try:
try:
mem = [str(names) for names in ctx.guild.voice_client.channel.members]
except:
mem = []
if mem.count(str(getattr(ctx, 'author', getattr(ctx, 'user', None)))) > 0:
re[3][str(ctx.guild.id)] += 1
if re[3][str(ctx.guild.id)] >= len(queue_song[str(ctx.guild.id)]):
re[3][str(ctx.guild.id)] = len(queue_song[str(ctx.guild.id)]) - 1
await ctx.send(
embed=nextcord.Embed(
title="Last song",
description="Only "
+ str(len(queue_song[str(ctx.guild.id)]))
+ " songs in your queue",
color=nextcord.Color(value=re[8]),
)
)
song = queue_song[str(ctx.guild.id)][re[3][str(ctx.guild.id)]]
voice = nextcord.utils.get(client.voice_clients, guild=ctx.guild)
URL = youtube_download(ctx, song)
embed=nextcord.Embed(
title="Playing",
description=da1.get(song,"Unavailable"),
color=nextcord.Color(value=re[8]),
)
await isReaction(ctx,embed)
voice.stop()
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
else:
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to move to the next song",
color=nextcord.Color(value=re[8]),
)
await isReaction(ctx,embed)
except Exception as e:
channel = client.get_channel(dev_channel)
await channel.send(
embed=cembed(
title="Error in next function",
description=str(e),
footer=f"{ctx.channel.name}:{ctx.guild.name}",
color=nextcord.Color(value=re[8]),
)
)
@client.command(aliases=["<"])
@commands.check(check_command)
async def previous(ctx):
req()
try:
try:
mem = [str(names) for names in ctx.guild.voice_client.channel.members]
except:
mem = []
if mem.count(str(getattr(ctx, 'author', getattr(ctx, 'user', None)))) > 0:
re[3][str(ctx.guild.id)] -= 1
if re[3][str(ctx.guild.id)] == -1:
re[3][str(ctx.guild.id)] = len(queue_song.get(str(ctx.guild.id),[]))-1
song = queue_song[str(ctx.guild.id)][re[3][str(ctx.guild.id)]]
if not song in da1.keys():
da1[song] = youtube_info(song)["title"]
voice = nextcord.utils.get(client.voice_clients, guild=ctx.guild)
URL = youtube_download(ctx, song)
embed=nextcord.Embed(
title="Playing",
description=da1[song],
color=nextcord.Color(value=re[8]),
)
voice.stop()
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
await isReaction(ctx,embed)
else:
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to move to the previous song",
color=nextcord.Color(value=re[8]),
)
await isReaction(ctx,embed)
except Exception as e:
channel = client.get_channel(dev_channel)
await channel.send(
embed=cembed(
title="Error in previous function",
description=str(e),
color=nextcord.Color(value=re[8]),
footer=f"{ctx.author.name}: {ctx.guild.name}"
)
)
@client.slash_command(name="dictionary", description="Use the dictionary for meaning")
async def dic(ctx, word):
await ctx.response.defer()
try:
mean = Meaning(word = word, color = re[8])
await mean.setup()
await assets.pa(ctx, mean.create_texts(), start_from=0, restricted=False)
except Exception as e:
await ctx.send(
embed=ef.cembed(
title="Something is wrong",
description="Oops something went wrong, I gotta check this out real quick, sorry for the inconvenience",
color=discord.Color.red(),
thumbnail=client.user.avatar.url
)
)
print(traceback.format_exc())
@client.slash_command(name = "play", description = "play a song, you can also put a song name in that")
async def play_slash(inter, index):
await inter.response.defer()
await play(inter, index = index)
@client.slash_command(name = "queue", description = "play a song")
async def queue_slash(inter, song = "-"):
if song == "-": song = ""
await queue(inter, name = song)
@client.command(aliases=["p"])
@commands.check(check_command)
@commands.cooldown(1,10,commands.BucketType.guild)
async def play(ctx, *, index):
ind = index
req()
if (
ctx.guild.voice_client == None
and getattr(ctx, 'author', getattr(ctx, 'user', None)).voice
and getattr(ctx, 'author', getattr(ctx, 'user', None)).voice.channel
):
if not str(ctx.guild.id) in queue_song:
queue_song[str(ctx.guild.id)] = []
if not str(ctx.guild.id) in re[3]:
re[3][str(ctx.guild.id)] = 0
channel = getattr(ctx, 'author', getattr(ctx, 'user', None)).voice.channel.id
vc_channel[str(ctx.guild.id)] = channel
voiceChannel = nextcord.utils.get(ctx.guild.voice_channels, id=channel)
await voiceChannel.connect()
try:
if check_voice(ctx):
if ind.isnumeric():
if int(ind) <= len(queue_song[str(ctx.guild.id)]):
client.re[3][str(ctx.guild.id)] = int(ind)
voice = nextcord.utils.get(client.voice_clients, guild=ctx.guild)
index = client.re[3][str(ctx.guild.id)]
songs = client.queue_song[str(ctx.guild.id)]
song = songs[index]
URL = youtube_download(ctx, song)
if song not in client.da1:
client.da1[song] = await get_name(song)
mess = await ctx.send(
embed=nextcord.Embed(
title="Playing",
description=da1[song],
color=nextcord.Color(value=re[8]),
)
)
voice.stop()
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
await player_pages(mess)
else:
embed = nextcord.Embed(
title="Hmm",
description=f"There are only {len(songs)} songs",
color=nextcord.Color(value=client.re[8]),
)
await ctx.send(embed=embed)
else:
name = ind
voice = nextcord.utils.get(client.voice_clients, guild=ctx.guild)
name = convert_to_url(name)
htm = await get_async("https://www.youtube.com/results?search_query=" + name)
video = regex.findall(r"watch\?v=(\S{11})", htm)
if len(video) == 0:
await ctx.send(
embed=cembed(
description="We couldnt find the song, please try it with a different name, shorter name is prefered",
color=client.re[8]
)
)
return
url = "https://www.youtube.com/watch?v=" + video[0]
URL, name_of_the_song = youtube_download1(ctx, url)
client.re[3][str(ctx.guild.id)] = len(queue_song[str(ctx.guild.id)])
if queue_song[str(ctx.guild.id)][-1] != url:
queue_song[str(ctx.guild.id)].append(url)
else:
queue_song[str(ctx.guild.id)].append(url)
da1[url] = name_of_the_song
voice.stop()
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
await ctx.send(
embed=nextcord.Embed(
title="Playing",
description=name_of_the_song,
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to play the song",
color=nextcord.Color(value=re[8]),
)
)
except Exception as e:
channel = client.get_channel(dev_channel)
await ctx.send(
embed=nextcord.Embed(
title="Error in play function",
description=f"{e}",
color=nextcord.Color(value=re[8]),
)
)
await channel.send(
embed=nextcord.Embed(
title="Error in play function",
description=f"{e}\n{ctx.guild.name}: {ctx.channel.name}",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
async def feedback(ctx, *, text):
embed=cembed(
title=f"Message from {getattr(ctx, 'author', getattr(ctx, 'user', None)).name}: {ctx.guild.name}",
description=text,
color=re[8],
thumbnail=client.user.avatar.url
)
await ctx.send(embed=embed)
confirmation = await wait_for_confirm(ctx,client,"Do you want to send this to the developers?",color=re[8])
if not confirmation:
return
auth = getattr(ctx,'author',getattr(ctx,'user', None)).id
await client.get_channel(932890298013614110).send(
content=str(ctx.channel.id)+" "+str(auth),
embed=embed
)
await ctx.send(
embed=cembed(
title="Done",
description="I've given this info to the developers, they will try fixing it asap :smiley:",
color=re[8]
)
)
@client.slash_command(name = "feedback",description="Send a feedback to the developers")
async def f_slash(inter, text):
await feedback(inter, text=text)
async def poll(ctx, Options = "", Question = "", image=""):
channel = ctx.channel
text = Question+"\n\n"
Options = Options.split("|")
if len(Options)>=20:
reply = "Use this if you want to redo\n\n"
reply+= f"Question: `{Question}` \n"
reply+= f"Options: `{'|'.join(Options)}`"
await ctx.send(
embed=cembed(
title="Sorry you can only give 20 options",
description=reply,
color=discord.Color.red(),
thumbnail=client.user.avatar.url
)
)
for i in range(len(Options)):
text+=f"{emoji.emojize(f':keycap_{i+1}:') if i<10 else Emoji_alphabets[i-10]} | {Options[i].strip()}\n"
embed=cembed(
title="Poll",
description=text,
color=re[8],
footer=f"from {getattr(ctx, 'author', getattr(ctx, 'user', None)).name} | {ctx.guild.name}",
picture = image
)
embed.set_author(name = getattr(ctx, 'author', getattr(ctx, 'user', None)).name, icon_url = getattr(ctx, 'author', getattr(ctx, 'user', None)).avatar.url if getattr(ctx, 'author', getattr(ctx, 'user', None)).avatar else client.user.avatar.url)
message = await ctx.send(
embed = embed
)
for i in range(len(Options)): await message.add_reaction(emoji.emojize(f":keycap_{i+1}:") if i<10 else Emoji_alphabets[i-10])
@client.slash_command(name="polling", description="Seperate options with |")
async def polling_slash(ctx, question = None, options="yes|no",image="https://upload.wikimedia.org/wikipedia/commons/archive/c/ca/20200404084254%211x1.png"):
await ctx.response.defer()
await poll(ctx, Options = options, Question = question if question else "", image = image)
@client.slash_command(name="eval",description="This is only for developers",guild_ids= [822445271019421746])
async def eval_slash(ctx,text):
await python_shell(ctx, text = text)
@client.command(aliases=["!"])
@commands.check(check_command)
async def restart_program(ctx, text):
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in list(dev_users):
save_to_file()
if len(client.voice_clients)>0:
confirmation = await wait_for_confirm(
ctx, client, f"There are {len(client.voice_clients)} servers listening to music through Alfred, Do you wanna exit?", color=re[8]
)
if not confirmation:
return
try:
for voice in client.voice_clients:
voice.stop()
await voice.disconnect()
except:
pass
await client.change_presence(activity = nextcord.Activity(type = nextcord.ActivityType.listening, name= "Restart"))
print("Restart")
await ctx.channel.send(
embed=cembed(
title="Restarted",
description="The program is beginning it's restarting process",
color=re[8],
thumbnail=client.user.avatar.url
)
)
await client.get_channel(dev_channel).send(
embed=cembed(
title="Restart",
description=f"Requested by {getattr(ctx, 'author', getattr(ctx, 'user', None)).name}",
thumbnail=client.user.avatar.url,
color=re[8]
)
)
os.system("busybox reboot")
else:
await ctx.channel.send(embed=cembed(title="Permission Denied",description="Only developers can access this function",color=re[8],thumbnail=client.user.avatar.url))
await client.get_channel(dev_channel).send(embed=cembed(description=f"{getattr(ctx, 'author', getattr(ctx, 'user', None)).name} from {ctx.guild.name} tried to use restart_program command",color=re[8]))
@client.command(aliases=["*"])
@commands.check(check_command)
async def change_nickname(ctx, member: nextcord.Member, *, nickname):
if (getattr(ctx, 'author', getattr(ctx, 'user', None)).guild_permissions.change_nickname or getattr(ctx, 'author', getattr(ctx, 'user', None)).id == 432801163126243328):
if (getattr(ctx, 'author', getattr(ctx, 'user', None)).top_role.position > member.top_role.position):
await member.edit(nick=nickname)
await ctx.send(
embed=nextcord.Embed(
title="Nickname Changed",
description=(
"Nickname changed to "
+ member.mention
+ " by "
+ getattr(ctx, 'author', getattr(ctx, 'user', None)).mention
),
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description=(
"You do not have the required permissions."
),
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permissions Denied",
description="You dont have permission to change others nickname",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def dev_test(ctx, id:nextcord.Member=None):
if id:
if str(id.id) in dev_users:
await ctx.send(f"{id} is a dev!")
else:
await ctx.send(f"{id} is not a dev!")
else:
await ctx.send("You need to mention somebody")
@client.event
async def on_message_edit(message_before, message_after):
await client.process_commands(message_after)
@client.command()
@commands.check(check_command)
async def clear(ctx, text, num=10):
req()
await ctx.message.delete()
if str(text) == re[1]:
user = getattr(ctx, 'author', getattr(ctx, 'user', None))
if user.guild_permissions.manage_messages or user.id == 432801163126243328:
confirmation = True
if int(num) > 10:
confirmation = await wait_for_confirm(
ctx, client, f"Do you want to delete {num} messages", color=re[8]
)
if confirmation:
await ctx.channel.delete_messages(
[i async for i in ctx.channel.history(limit=num) if not i.pinned][:100]
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description="You cant delete messages",
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send("Wrong password")
@client.event
async def on_reaction_add(reaction, user):
req()
ctx = reaction.message
try:
if not user.bot:
global Emoji_list
if reaction.emoji == "⏮":
if (
str(user) != str(client.user)
and reaction.message.author == client.user
):
try:
await reaction.remove(user)
except:
pass
req()
reaction.message.author = user
await previous(reaction.message)
if reaction.emoji == "⏭":
if (
str(user) != str(client.user)
and reaction.message.author == client.user
):
try:await reaction.remove(user)
except:pass
req()
reaction.message.author = user
await next(reaction.message)
if str(user.id) in list(dev_users):
global dev_channel
channel = client.get_channel(dev_channel)
if (
reaction.emoji == emoji.emojize(":laptop:")
and str(reaction.message.channel.id) == str(channel.id)
and reaction.message.author == client.user
):
string = ""
await reaction.remove(user)
for i in list(dev_users):
string = string + str(client.get_user(int(i)).name) + "\n"
await channel.send(
embed=nextcord.Embed(
title="Developers",
description=string + "\n\nThank you for supporting",
color=nextcord.Color(value=re[8]),
)
)
if reaction.emoji == emoji.emojize(":bar_chart:") and str(
reaction.message.channel.id
) == str(channel.id):
await reaction.remove(user)
reaction.message.author == user
await load(reaction.message)
if reaction.emoji == "⭕" and ctx.channel.id == channel.id:
await reaction.remove(user)
await channel.send(
embed=nextcord.Embed(
title="Servers",
description='\n'.join([i.name+"" for i in client.guilds]),
color=nextcord.Color(value=re[8]),
)
)
if reaction.emoji == emoji.emojize(":fire:") and str(
reaction.message.channel.id
) == str(channel.id):
reaction.message.author = user
await restart_program(reaction.message,re[1])
if reaction.emoji == '💾' and reaction.message.channel.id == channel.id:
save_to_file()
await reaction.remove(user)
if reaction.emoji == emoji.emojize(":cross_mark:") and str(
reaction.message.channel.id
) == str(channel.id):
await reaction.remove(user)
if len(client.voice_clients)>0:
confirmation = await wait_for_confirm(
reaction.message, client, f"There are {len(client.voice_clients)} servers listening to music through Alfred, Do you wanna exit?", color=re[8], usr=user
)
if not confirmation:
return
try:
for voice in client.voice_clients:
voice.stop()
await voice.disconnect()
except:
pass
await channel.purge(limit=10000000000)
await channel.send(
embed=nextcord.Embed(
title="Exit",
description=("Requested by " + str(user)),
color=nextcord.Color(value=re[8]),
)
)
sys.exit()
if reaction.emoji == emoji.emojize(":satellite:") and str(
reaction.message.channel.id
) == str(channel.id):
string = ""
await reaction.remove(user)
await channel.send("Starting speedtest")
download_speed = int(st_speed.download()) // 1024 // 1024
upload_speed = int(st_speed.upload()) // 1024 // 1024
servers = st_speed.get_servers([])
ping = st_speed.results.ping
await channel.send(
embed=nextcord.Embed(
title="Speedtest Results:",
description=str(download_speed)
+ "Mbps\n"
+ str(upload_speed)
+ "Mbps\n"
+ str(ping)
+ "ms",
color=nextcord.Color(value=re[8]),
)
)
if reaction.emoji == emoji.emojize(":black_circle:") and str(
reaction.message.channel.id
) == str(channel.id):
await devop_mtext(client, channel, re[8])
except PermissionError:
await reaction.message.channel.send(embed=cembed(
title="Missing Permissions",
description="Alfred is missing permissions, please try to fix this, best recommended is to add Admin to the bot",
color=re[8],
thumbnail=client.user.avatar.url)
)
except Exception as e:
channel = client.get_channel(dev_channel)
await channel.send(
embed=cembed(
title="Error in on_reaction_add",
description=f"{traceback.format_exc()}",
footer=f"{reaction.message.guild.name}:{reaction.message.channel.name}",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
async def learn(ctx):
embeds = []
with open("Learn.md") as f:
l = f.read().replace("- ",":diamond_shape_with_a_dot_inside: ").split("\n\n")
j = l[:8]
j.append("\n\n".join(l[8:]))
a=0
for i in j:
a+=1
embed = cembed(title="Learn",color=re[8],description=i,footer=str(a)+" of "+str(len(j)))
embeds.append(embed)
await assets.pa(ctx,embeds)
@client.command()
async def howdoi(ctx, *, question):
await ctx.send(embed=cembed(description=subprocess.getoutput(f"howdoi {question}"),color=re[8],title="How Do I"))
@client.event
async def on_command_error(ctx, error):
if error == nextcord.errors.HTTPException:
os.system("busybox reboot")
if type(error) == nextcord.ext.commands.errors.CheckFailure:
await ctx.send(
embed=cembed(
title="Disabled command",
description="This command has been disabled by your admin, please ask them to enable it to use this\n\nIf you're an admin and you want to enable this command, use `/commands <enable> <command_name>`",
color=client.re[8],
thumbnail=safe_pfp(ctx.author)
)
)
return
channel = client.get_channel(dev_channel)
if error == nextcord.HTTPException: os.system("busybox reboot")
print(error.with_traceback(error.__traceback__))
if type(error) != nextcord.ext.commands.errors.CommandNotFound:
await ctx.send(embed=ror.error(str(error)))
await channel.send(embed=cembed(title="Error",description=f"\n{str(error)}", color=re[8], thumbnail=client.user.avatar.url, footer = f"{getattr(ctx, 'author', getattr(ctx, 'user', None)).name}:{ctx.guild.name}"))
@client.event
async def on_message(msg):
await client.process_commands(msg)
if (not msg.guild.id in observer) and (not msg.author.bot) and False:
try:
s = msg.clean_content
whitelist = string.ascii_letters + ' '
global new_s
new_s = ''.join(c for c in s if c in whitelist)
req()
new_s = regex.sub(' +', ' ', new_s)
if new_s != '' or new_s is not None:
json = {"text" : new_s}
if msg.author.id not in deathrate.keys():
deathrate[msg.author.id]=0
preds = await post_async("https://suicide-detector-api-1.yashvardhan13.repl.co/classify", json=json)
if preds["result"] == "Sucide":
await msg.add_reaction("🫂")
deathrate[msg.author.id]+=1
except Exception as e:
print(e)
auth = os.getenv("transformers_auth")
headeras = {"Authorization": f"Bearer {auth}"}
BASE_URL = "https://api-inference.huggingface.co/models"
if re[10] == 1:API_URL = f"{BASE_URL}/facebook/blenderbot-400M-distill"
else:API_URL = f"{BASE_URL}/microsoft/DialoGPT-large"
try:
if msg.content.lower().startswith("alfred ") and msg.guild.id not in config['respond'] and not msg.author.bot:
input_text = msg.content.lower().replace("alfred", "")
payload = {
"inputs": {
"past_user_inputs": past_respose,
"generated_responses": generated,
"text": input_text,
},
"parameters": {"repetition_penalty": 1.33},
}
if re[10]!=1:
payload = {
"inputs": input_text
}
output = await post_async(API_URL, header=headeras, json=payload)
if len(past_respose) < 50:
past_respose.append(input_text)
generated.append(output["generated_text"])
else:
past_respose.pop(0)
generated.pop(0)
past_respose.append(input_text)
generated.append(output["generated_text"])
await msg.reply(output["generated_text"])
if msg.channel.id in autor:
for emo in autor[msg.channel.id]:
await msg.add_reaction(emoji.emojize(emo.strip()))
await asyncio.sleep(1)
except Exception as e:
channel = client.get_channel(dev_channel)
await channel.send(
embed=nextcord.Embed(
title="Error", description=str(e), color=nextcord.Color(value=re[8])
)
)
@client.command(aliases=["m"])
@commands.check(check_command)
async def python_shell(ctx, *, text):
req()
print("Python Shell", text, str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
global dev_users
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in dev_users:
try:
text = text.replace("```py", "").replace("```", "")
a = eval(text)
print(text)
em = cembed(
title=text,
description=str(a),
color=nextcord.Color(value=re[8]),
thumbnail="https://engineering.fb.com/wp-content/uploads/2016/05/2000px-Python-logo-notext.svg_.png"
)
await ctx.send(embed=em)
except Exception as e:
await ctx.send(
embed=nextcord.Embed(
title="Error_message",
description=str(e),
color=nextcord.Color(value=re[8]),
)
)
else:
try:
await ctx.message.delete()
except:
pass
await ctx.send(
embed=nextcord.Embed(
description="Permissions Denied",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def exe(ctx, *, text):
req()
if (
str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in dev_users
):
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in dev_users and ctx.guild.id != 822445271019421746:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description = "You can only use this command in Batcave",
color=re[8]
)
)
return
text = text.replace("```py", "```")
text = text[3:-3].strip()
f = StringIO()
with redirect_stdout(f):
try:
exec(text)
except Exception as e:
traceback.print_tb(e.__traceback__)
error_mssg = "Following Error Occured:\n\n"+traceback.format_exc()
await ctx.send(embed = ror.error(error_mssg))
output = f.getvalue()
embeds=[]
if output == "":
output = "_"
for i in range(len(output)//2000+1):
em = cembed(title="Python",description=output[i*2000:i*2000+2000],color=re[8])
em.set_thumbnail(
url="https://engineering.fb.com/wp-content/uploads/2016/05/2000px-Python-logo-notext.svg_.png"
)
embeds.append(em)
await assets.pa(ctx, embeds, start_from=0, restricted=False)
else:
await ctx.send(
embed=nextcord.Embed(
title="Denied",
description="Ask Devs to give access for scripts",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
async def cute_cat(ctx, res="1920x1080"):
query = "kitten"
resp = requests.get(f"https://source.unsplash.com/{res}?{query}")
file = open("cat.png", "wb")
file.write(resp.content)
file.close()
with open("cat.png","rb") as f:
file = discord.File(f)
em = discord.Embed(title=ctx.author, color=re[8])
em.set_image(url="attachment://cat.png")
await ctx.send(file=file, embed=em)
def addt(p1, p2):
da[p1] = p2
return "Done"
def get_elem(k):
return da.get(k, "Not assigned yet")
def de(k):
del da[k]
return "Done"
def req():
re[0] = re[0] + 1
def g_req():
return re[0]
def reload_extension(name):
client.unload_extension(f'cogs.{name}')
return load_extension(name)
def load_extension(name):
'''
This will safely add cog for alfred with all the requirements
'''
try:
l = __import__(name).requirements()
d = {}
for i in l:
d[i] = globals()[i]
client.load_extension(f'cogs.{name}',extras=d)
return f"[ OK ] Added {name}\n"
except:
return f"Error in cog {name}:\n"+traceback.format_exc()+"\n"
def load_all():
for i in os.listdir(os.getcwd()+"/cogs"):
if i.endswith(".py"):
global report
report+=load_extension(i[:-3])
client.remove_command("help")
load_all()
keep_alive()
try:
client.run(os.getenv("token"))
except:
print(traceback.format_exc());time.sleep(20);os.system("busybox reboot")
|
[] |
[] |
[
"transformers_auth",
"wolfram",
"dev",
"token"
] |
[]
|
["transformers_auth", "wolfram", "dev", "token"]
|
python
| 4 | 0 | |
pref/pref.go
|
// +build !android !darwin !windows
package pref
import (
"flag"
"fmt"
"log"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
)
// Verbose output
var (
Verbose = false
)
const (
maxEntries = 10000
minMaxEntries = 200 // minimal value for the maxEntries variable
)
// Pref object.
type Pref struct {
DataDir, HomeDir, Add string
List, Search Query
Version, Init bool
MaxEntries int
}
// Parse flags.
func Parse() Pref {
homeDir := userHome()
// env is only set when run as snap
dataDir := os.Getenv("SNAP_USER_COMMON")
if dataDir == "" {
dataDir = filepath.Join(homeDir, ".local/share/maybe")
}
var p Pref
p.HomeDir = homeDir
flagDatadirVar(&p.DataDir, "datadir", dataDir, "")
flag.StringVar(&p.Add, "add", "", "add path to index")
q := flag.String("search", "", "search for keyword")
l := flag.String("list", "", "list results for keyword")
flag.BoolVar(&p.Init, "init", false, "scan $HOME and add folders (six folder-level deep)")
flag.BoolVar(&p.Version, "version", false, "print maybe version")
flagMaxentriesVar(&p.MaxEntries, "max-entries", maxEntries, "maximum unique path-entries")
verb := flag.Bool("v", false, "verbose")
flag.Parse()
p.Search = queryFrom(*q)
p.List = queryFrom(*l)
Verbose = *verb
return p
}
// Query object
type Query struct {
Start, Last string
}
// IsNotEmpty returns true if Query.Last contains data.
func (q *Query) IsNotEmpty() bool { return len(q.Last) > 0 }
func (q Query) String() string {
return fmt.Sprintf("{start: %s last: %s}", q.Start, q.Last)
}
func queryFrom(s string) Query {
s = strings.TrimSpace(s)
if s == "" {
return Query{}
}
if arg := flag.Args(); len(arg) > 0 {
return Query{Start: s, Last: arg[0]}
}
return Query{Last: s}
}
func userHome() string {
user, err := user.Current()
if err != nil {
log.Fatalf("current user unknown: %v\n", err)
}
if strings.TrimSpace(user.HomeDir) == "" {
log.Fatal("user home directory path is empty")
}
return user.HomeDir
}
type maxentries int
func flagMaxentriesVar(p *int, name string, value int, usage string) {
*p = value
flag.CommandLine.Var((*maxentries)(p), name, usage)
}
func (m *maxentries) String() string { return strconv.Itoa(int(*m)) }
func (m *maxentries) Set(s string) error {
n, err := strconv.ParseInt(s, 0, 64)
if n < minMaxEntries {
n = minMaxEntries
}
*m = maxentries(n)
return err
}
type datadir string
func flagDatadirVar(p *string, name string, value string, usage string) {
*p = value
flag.CommandLine.Var((*datadir)(p), name, usage)
}
func (m *datadir) String() string { return string(*m) }
func (m *datadir) Set(s string) error {
if strings.TrimSpace(s) == "" {
return fmt.Errorf("datadir empty or consists only of whitespace")
}
*m = datadir(s)
return nil
}
|
[
"\"SNAP_USER_COMMON\""
] |
[] |
[
"SNAP_USER_COMMON"
] |
[]
|
["SNAP_USER_COMMON"]
|
go
| 1 | 0 | |
pkg/stripemock/stripe.go
|
// stripemock is a copy from https://github.com/stripe/stripe-go/blob/master/testing.
package stripemock
import (
"crypto/tls"
"fmt"
"net/http"
"os"
"strconv"
"strings"
stripe "github.com/stripe/stripe-go/v72"
"github.com/stripe/stripe-go/v72/form"
"golang.org/x/net/http2"
"gopkg.in/h2non/gock.v1"
)
// This file should contain any testing helpers that should be commonly
// available across all tests in the Stripe package.
//
// There's not much in here because it' a relatively recent addition to the
// package, but should be used as appropriate for any new changes.
const (
// MockMinimumVersion is the minimum acceptable version for stripe-mock.
// It's here so that if the library depends on new endpoints or features
// added in a more recent version of stripe-mock, we can show people a
// better error message instead of the test suite crashing with a bunch of
// confusing 404 errors or the like.
MockMinimumVersion = "0.109.0"
// TestMerchantID is a token that can be used to represent a merchant ID in
// simple tests.
TestMerchantID = "acct_123"
)
func Init() {
// Enable strict mode on form encoding so that we'll panic if any kind of
// malformed param struct is detected
form.Strict = true
port := os.Getenv("STRIPE_MOCK_PORT")
if port == "" {
port = "12112"
}
// stripe-mock's certificate for localhost is self-signed so configure a
// specialized client that skips the certificate authority check.
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, //nolint
},
}
// Go can often enable HTTP/2 automatically if it's supported, but
// confusingly, if you set `TLSClientConfig`, it disables it and you have
// to explicitly invoke http2's `ConfigureTransport` to get it back.
//
// See the incorrectly closed bug report here:
//
// https://github.com/golang/go/issues/20645git
//
err := http2.ConfigureTransport(transport)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize HTTP/2 transport: %v\n", err)
os.Exit(1)
}
httpClient := &http.Client{
Transport: transport,
}
check := os.Getenv("STRIPE_MOCK_INIT_CHECK")
if check != "false" {
resp, err := httpClient.Get("https://localhost:" + port)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't reach stripe-mock at `localhost:%s` (%v). Is "+
"it running? Please see README for setup instructions.\n", port, err)
os.Exit(1)
}
version := resp.Header.Get("Stripe-Mock-Version")
if version != "master" && compareVersions(version, MockMinimumVersion) > 0 {
fmt.Fprintf(os.Stderr, "Your version of stripe-mock (%s) is too old. The "+
"minimum version to run this test suite is %s. Please see its "+
"repository for upgrade instructions.\n", version, MockMinimumVersion)
os.Exit(1)
}
}
stripe.Key = "sk_test_myTestKey"
confBackend("https://localhost:"+port, httpClient)
}
func InitForError() {
stripeURL := "http://stripeapi"
gock.New(stripeURL).
Post("/v1/charges").
Reply(402).
BodyString(`{
"error": {
"type":"card_error",
"code": "balance_insufficient",
"message":"card is suspicious"
}
}`)
confBackend(stripeURL, &http.Client{})
}
func confBackend(addr string, httpClient *http.Client) {
// Configure a backend for stripe-mock and set it for both the API and
// Uploads (unlike the real Stripe API, stripe-mock supports both these
// backends).
stripeMockBackend := stripe.GetBackendWithConfig(
stripe.APIBackend,
&stripe.BackendConfig{
URL: stripe.String(addr),
HTTPClient: httpClient,
LeveledLogger: stripe.DefaultLeveledLogger,
},
)
stripe.SetBackend(stripe.APIBackend, stripeMockBackend)
stripe.SetBackend(stripe.UploadsBackend, stripeMockBackend)
}
// compareVersions compares two semantic version strings. We need this because
// with more complex double-digit numbers, lexical comparison breaks down.
func compareVersions(a, b string) (ret int) {
as := strings.Split(a, ".")
bs := strings.Split(b, ".")
loopMax := len(bs)
if len(as) > len(bs) {
loopMax = len(as)
}
for i := 0; i < loopMax; i++ {
var x, y string
if len(as) > i {
x = as[i]
}
if len(bs) > i {
y = bs[i]
}
xi, _ := strconv.Atoi(x)
yi, _ := strconv.Atoi(y)
if xi > yi {
ret = -1
} else if xi < yi {
ret = 1
}
if ret != 0 {
break
}
}
return ret
}
|
[
"\"STRIPE_MOCK_PORT\"",
"\"STRIPE_MOCK_INIT_CHECK\""
] |
[] |
[
"STRIPE_MOCK_INIT_CHECK",
"STRIPE_MOCK_PORT"
] |
[]
|
["STRIPE_MOCK_INIT_CHECK", "STRIPE_MOCK_PORT"]
|
go
| 2 | 0 | |
dormant/core/src/test/java/org/apache/tamaya/core/properties/PropertySourceBuilderTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya.core.properties;
import org.apache.tamaya.PropertySource;
import org.junit.Test;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Created by Anatole on 30.09.2014.
*/
public class PropertySourceBuilderTest {
@Test
public void testFromEnvironmentProperties(){
PropertySource prov = PropertySourceBuilder.of("testdata").addEnvironmentProperties().build();
assertNotNull(prov);
for(Map.Entry<String,String> en:System.getenv().entrySet()){
assertEquals(en.getValue(), prov.get(en.getKey()).get());
}
}
@Test
public void testFromSystemProperties(){
PropertySource prov = PropertySourceBuilder.of("testdata").addSystemProperties().build();
assertNotNull(prov);
for(Map.Entry<Object,Object> en:System.getProperties().entrySet()){
assertEquals(en.getValue(), prov.get(en.getKey().toString()).get());
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
booksite/booksite/settings.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kr71xy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'corsheaders',
'rest_framework',
'rest_framework_jwt',
'djoser',
'threadedcomments',
'django_comments',
'robots',
'raven.contrib.django.raven_compat',
'django_assets',
'captcha',
'graphene_django',
'booksite.book',
'booksite.usercenter',
'booksite.background',
'booksite.h5',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'booksite.usercenter.middleware.JWTAuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
)
ROOT_URLCONF = 'booksite.urls'
WSGI_APPLICATION = 'booksite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
GRAPHENE = {
'SCHEMA': 'booksite.schema.schema'
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
SITE_ID = 1
COMMENTS_APP = 'threadedcomments'
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(BASE_DIR, 'booksite/templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
"booksite.context_processors.analystics",
"booksite.context_processors.categorys",
"booksite.context_processors.bookmark_update_count",
],
},
},
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'booksite/static'),
)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django_assets.finders.AssetsFinder"
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'bookstore')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'usercenter.User'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:1',
'KEY_PREFIX': 'booksite',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': '',
}
}
}
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '***********@gmail.com'
EMAIL_HOST_PASSWORD = '******'
EMAIL_SUBJECT_PREFIX = u'[kanxiaoshuo.me]'
EMAIL_USE_TLS = True
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
DJOSER = {
'DOMAIN': os.environ.get('DJANGO_DJOSER_DOMAIN', 'bobdylan.local'),
'SITE_NAME': os.environ.get('DJANGO_DJOSER_SITE_NAME', 'my site'),
'PASSWORD_RESET_CONFIRM_URL': '?action=set-new-password&uid={uid}&token={token}',
'ACTIVATION_URL': '?action=activate&uid={uid}&token={token}',
'SEND_ACTIVATION_EMAIL': False,
}
JWT_AUTH = {
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=300),
}
try:
from .local_settings import *
except:
raise ImportError('应当使用与settings同级别目录下的local_settings文件')
|
[] |
[] |
[
"DJANGO_DJOSER_SITE_NAME",
"DJANGO_DJOSER_DOMAIN"
] |
[]
|
["DJANGO_DJOSER_SITE_NAME", "DJANGO_DJOSER_DOMAIN"]
|
python
| 2 | 0 | |
templates/templates.go
|
package templates
import (
"database/sql"
"errors"
"fmt"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
"os"
structs "region-api/structs"
utils "region-api/utils"
)
func GetURLTemplates(db *sql.DB, params martini.Params, r render.Render) {
urltemplates, err := getURLTemplates()
if err != nil {
utils.ReportError(err, r)
return
}
r.JSON(200, urltemplates)
}
func getURLTemplates() (u structs.URLTemplates, e error) {
var urltemplates structs.URLTemplates
urltemplates.Internal = os.Getenv("ALAMO_INTERNAL_URL_TEMPLATE")
urltemplates.External = os.Getenv("ALAMO_URL_TEMPLATE")
if urltemplates.Internal == "" {
fmt.Println("No internal url template")
return urltemplates, errors.New("No Inernal URL Template")
}
if urltemplates.External == "" {
fmt.Println("No external url template")
return urltemplates, errors.New("No External URL Template")
}
return urltemplates, nil
}
|
[
"\"ALAMO_INTERNAL_URL_TEMPLATE\"",
"\"ALAMO_URL_TEMPLATE\""
] |
[] |
[
"ALAMO_INTERNAL_URL_TEMPLATE",
"ALAMO_URL_TEMPLATE"
] |
[]
|
["ALAMO_INTERNAL_URL_TEMPLATE", "ALAMO_URL_TEMPLATE"]
|
go
| 2 | 0 | |
api/smtp_owner_reset_hex.go
|
package main
import (
"bytes"
"net/smtp"
"os"
)
type ownerResetHexPlugs struct {
Origin string
ResetHex string
}
func smtpOwnerResetHex(to string, toName string, resetHex string) error {
var header bytes.Buffer
headerTemplate.Execute(&header, &headerPlugs{FromAddress: os.Getenv("SMTP_FROM_ADDRESS"), ToAddress: to, ToName: toName, Subject: "Reset your password"})
var body bytes.Buffer
templates["reset-hex"].Execute(&body, &ownerResetHexPlugs{Origin: os.Getenv("ORIGIN"), ResetHex: resetHex})
err := smtp.SendMail(os.Getenv("SMTP_HOST")+":"+os.Getenv("SMTP_PORT"), smtpAuth, os.Getenv("SMTP_FROM_ADDRESS"), []string{to}, concat(header, body))
if err != nil {
logger.Errorf("cannot send reset email: %v", err)
return errorCannotSendEmail
}
return nil
}
|
[
"\"SMTP_FROM_ADDRESS\"",
"\"ORIGIN\"",
"\"SMTP_HOST\"",
"\"SMTP_PORT\"",
"\"SMTP_FROM_ADDRESS\""
] |
[] |
[
"SMTP_HOST",
"SMTP_PORT",
"SMTP_FROM_ADDRESS",
"ORIGIN"
] |
[]
|
["SMTP_HOST", "SMTP_PORT", "SMTP_FROM_ADDRESS", "ORIGIN"]
|
go
| 4 | 0 | |
evaluation/compare_final2_compoelem.py
|
# call this script with `python -m evaluation.evaluate_poselines_globalaction`
import os
import numpy as np
import datetime
from tqdm import tqdm
from . import eval_utils
import pickle
import copyreg
import cv2
from .compare_deepfeatures import negative_cosine_dist_flatten, eucl_dist_flatten
from .compare_sift import compare_siftBFMatcher1
from .compare_orb import compare_orbBFMatcher1
from .compare_brief import compare_briefBFMatcher1
from compoelem.config import config
from compoelem.generate import global_action, pose_abstraction
from compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result
from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action
# fix cv2 keypoint pickling error
def _pickle_keypoint(keypoint): # : cv2.KeyPoint
return cv2.KeyPoint, (
keypoint.pt[0],
keypoint.pt[1],
keypoint.size,
keypoint.angle,
keypoint.response,
keypoint.octave,
keypoint.class_id,
)
# Apply the bundling to pickle
copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)
def compare_setupA(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):
if norm_method != 'norm_by_global_action':
raise NotImplementedError("only norm_by_global_action is implemented")
res_metrics = {}
precision_curves = {}
all_retrieval_res = []
for query_data in tqdm(data, total=len(data)):
compare_results = []
#query_pose_lines = minmax_norm_by_imgrect(query_data["compoelem"][pose_lines], query_data["width"], query_data["height"])
query_pose_lines_seq = norm_by_global_action(query_data["compoelem"]["pose_lines"], query_data["compoelem"]["global_action_lines"], fallback=glac_fallback)
for target_data in data:
if query_data["className"] == target_data["className"] and query_data["imgName"] == target_data["imgName"]:
continue
if compare_other == 'vgg19_ncos':
r_addition = negative_cosine_dist_flatten(query_data["imageNet_vgg19_bn_features"], target_data["imageNet_vgg19_bn_features"])
elif compare_other == 'resnet50_cos':
r_addition = negative_cosine_dist_flatten(query_data["places365_resnet50_feature_noFC"], target_data["places365_resnet50_feature_noFC"])
elif compare_other == 'resnet50_eucl':
r_addition = eucl_dist_flatten(query_data["places365_resnet50_feature_noFC"], target_data["places365_resnet50_feature_noFC"])
elif compare_other == 'sift_bfm1':
r_addition = compare_siftBFMatcher1(query_data["sift"], target_data["sift"])
elif compare_other == 'orb_bfm1':
r_addition = compare_orbBFMatcher1(query_data["orb"], target_data["orb"])
elif compare_other == 'brief_bfm1':
r_addition = compare_briefBFMatcher1(query_data["brief"], target_data["brief"])
elif compare_other is None:
r_addition = 0
else:
raise NotImplementedError("not implemented compare_other", compare_other)
#combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, minmax_norm_by_imgrect(target_data["compoelem"][pose_lines], target_data["width"], target_data["height"]))
target_pose_lines_seq = norm_by_global_action(target_data["compoelem"]["pose_lines"], target_data["compoelem"]["global_action_lines"], fallback=glac_fallback)
pair_compare_results = []
for query_pose_lines in query_pose_lines_seq:
for target_pose_lines in target_pose_lines_seq:
combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)
pair_compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))
combined_ratio, hit_ratio, neg_mean_distance_hits, target_data = filter_pose_line_ga_result(pair_compare_results)
a = additional_feature_weight
wra = r_addition * (1-a)
r_combi1 = wra * (1 - combined_ratio * a)
r_combi2 = wra + (1 - combined_ratio * a)
r_combi3 = wra * (1 - neg_mean_distance_hits * a)
r_combi4 = wra + (1 - neg_mean_distance_hits * a)
compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, r_combi1, r_combi2, r_combi3, r_combi4, r_addition, target_data))
compare_results = np.array(compare_results)
sorted_compare_results = sort_method(compare_results)
query_label = query_data["className"]
res_labels = list(map(lambda x: x["className"], sorted_compare_results[:,-1]))
res_keys = list(map(lambda x: x["className"]+'_'+x["imgName"], sorted_compare_results[:,-1]))
all_retrieval_res.append(np.array([
query_data["className"]+'_'+query_data["imgName"],
query_label,
res_keys,
res_labels
]))
metrics = eval_utils.score_retrievals(query_label, res_labels)
label = metrics["label"]
if label in precision_curves:
precision_curves[label].append(metrics["precision_at_rank"])
else:
precision_curves[label] = [metrics["precision_at_rank"]]
for key in metrics.keys():
if key != "label":
if key not in res_metrics:
res_metrics[key] = {}
if label not in res_metrics[key]:
res_metrics[key][label] = []
res_metrics[key][label].append(metrics[key])
return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))
def compare_setupB(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):
if compare_other is not None:
raise NotImplementedError("compare other not implemented")
res_metrics = {}
precision_curves = {}
all_retrieval_res = []
for query_data in tqdm(data, total=len(data)):
compare_results = []
if norm_method == 'none':
query_pose_lines = query_data["compoelem"]["pose_lines"]
elif norm_method == 'minmax_norm_by_imgrect':
query_pose_lines = minmax_norm_by_imgrect(query_data["compoelem"]["pose_lines"], query_data["compoelem"]["width"], query_data["compoelem"]["height"])
elif norm_method == 'minmax_norm_by_bbox':
query_pose_lines = minmax_norm_by_bbox(query_data["compoelem"]["pose_lines"])
else:
raise NotImplementedError("norm_method: {} not implemented".format(norm_method))
for target_data in data:
if query_data["className"] == target_data["className"] and query_data["imgName"] == target_data["imgName"]:
continue
if norm_method == 'none':
target_pose_lines = target_data["compoelem"]["pose_lines"]
elif norm_method == 'minmax_norm_by_imgrect':
target_pose_lines = minmax_norm_by_imgrect(target_data["compoelem"]["pose_lines"], target_data["compoelem"]["width"], target_data["compoelem"]["height"])
elif norm_method == 'minmax_norm_by_bbox':
target_pose_lines = minmax_norm_by_bbox(target_data["compoelem"]["pose_lines"])
else:
raise NotImplementedError("norm_method: {} not implemented".format(norm_method))
combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)
compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))
compare_results = np.array(compare_results)
sorted_compare_results = sort_method(compare_results)
query_label = query_data["className"]
res_labels = list(map(lambda x: x["className"], sorted_compare_results[:,-1]))
res_keys = list(map(lambda x: x["className"]+'_'+x["imgName"], sorted_compare_results[:,-1]))
all_retrieval_res.append(np.array([
query_data["className"]+'_'+query_data["imgName"],
query_label,
res_keys,
res_labels
]))
metrics = eval_utils.score_retrievals(query_label, res_labels)
label = metrics["label"]
if label in precision_curves:
precision_curves[label].append(metrics["precision_at_rank"])
else:
precision_curves[label] = [metrics["precision_at_rank"]]
for key in metrics.keys():
if key != "label":
if key not in res_metrics:
res_metrics[key] = {}
if label not in res_metrics[key]:
res_metrics[key][label] = []
res_metrics[key][label].append(metrics[key])
return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))
# indices for sorting functions
# 0: combined_ratio
# 1: hit_ratio
# 2: neg_mean_distance_hits
# 3: r_combi1
# 4: r_combi2
# 5: r_combi3
# 6: r_combi4
# 7: r_addition
# 8: target_data
def cr_desc(compare_results):
sorted_compare_results = compare_results[np.argsort(compare_results[:,0])][::-1]
return sorted_compare_results
def nmd_desc(compare_results):
sorted_compare_results = compare_results[np.argsort(compare_results[:,2])][::-1]
return sorted_compare_results
def hr_nmd_desc(compare_results):
# hr is primary and therefore second sorting key
# nmd is seondary and therefore second first key
sorted_compare_results = compare_results[np.lexsort((compare_results[:,2], compare_results[:,1]))][::-1]
return sorted_compare_results
# additional methods:
def hr_additional_desc(compare_results):
# hr is primary and therefore second sorting key
# r_addidtion is seondary and therefore second first key
sorted_compare_results = compare_results[np.lexsort((-compare_results[:,7], compare_results[:,1]))][::-1]
return sorted_compare_results
def hr_combi3_desc(compare_results):
# hr is primary and therefore second sorting key
# nmd is seondary and therefore second first key
sorted_compare_results = compare_results[np.lexsort((-compare_results[:,5], compare_results[:,1]))][::-1]
return sorted_compare_results
def hr_combi4_desc(compare_results):
# hr is primary and therefore second sorting key
# nmd is seondary and therefore second first key
sorted_compare_results = compare_results[np.lexsort((-compare_results[:,6], compare_results[:,1]))][::-1]
return sorted_compare_results
def combi1_asc(compare_results):
sorted_compare_results = compare_results[np.argsort(compare_results[:,3])]
return sorted_compare_results
def combi2_asc(compare_results):
sorted_compare_results = compare_results[np.argsort(compare_results[:,4])]
return sorted_compare_results
osuname = os.uname().nodename
print("osuname", osuname)
if osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':
COMPOELEM_ROOT = "/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem"
elif osuname == 'lme117':
COMPOELEM_ROOT = "/home/zi14teho/compositional_elements"
else:
COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')
DATASTORE_NAME = "combined_datastore_ceb_dataset"
DATASTORE_FILE = COMPOELEM_ROOT+"/final_evaluation/"+DATASTORE_NAME+".pkl"
EVAL_RESULTS_FILE_DIR = COMPOELEM_ROOT+"/final_evaluation/final2pkl/"
DATASTORE_NAME = "combined_datastore_ceb_dataset"
datastore = pickle.load(open(DATASTORE_FILE, "rb"))
datastore_name = DATASTORE_NAME
# def eval_single_combination(
# norm_method,
# sort_method_name,
# correction_angle,
# cone_opening_angle,
# cone_scale_factor,
# cone_base_scale_factor,
# filter_threshold,
# poseline_fallback,
# bisection_fallback,
# glac_fallback,
# ):
# print({
# "norm_method":norm_method,
# "sort_method_name":sort_method_name,
# "correction_angle":correction_angle,
# "cone_opening_angle":cone_opening_angle,
# "cone_scale_factor":cone_scale_factor,
# "cone_base_scale_factor":cone_base_scale_factor,
# "filter_threshold":filter_threshold,
# "poseline_fallback":poseline_fallback,
# "bisection_fallback":bisection_fallback,
# "glac_fallback":glac_fallback,
# })
def eval_single_combination(arg_obj):
print(arg_obj)
experiment_name = arg_obj["experiment_name"]
norm_method = arg_obj["norm_method"]
sort_method_name = arg_obj["sort_method_name"]
correction_angle = arg_obj["correction_angle"]
cone_opening_angle = arg_obj["cone_opening_angle"]
cone_scale_factor = arg_obj["cone_scale_factor"]
cone_base_scale_factor = arg_obj["cone_base_scale_factor"]
filter_threshold = arg_obj["filter_threshold"]
poseline_fallback = arg_obj["poseline_fallback"]
bisection_fallback = arg_obj["bisection_fallback"]
glac_fallback = arg_obj["glac_fallback"]
additional_feature_weight = arg_obj["additional_feature_weight"] if "additional_feature_weight" in arg_obj else 0.5
compare_other = arg_obj["compare_other"] if "compare_other" in arg_obj else None
setup = compare_setupA if norm_method == 'norm_by_global_action' else compare_setupB
if sort_method_name == 'cr_desc':
sort_method = cr_desc
elif sort_method_name == 'nmd_desc':
sort_method = nmd_desc
elif sort_method_name == 'hr_nmd_desc':
sort_method = hr_nmd_desc
elif sort_method_name == 'hr_additional_desc':
sort_method = hr_additional_desc
elif sort_method_name == 'hr_combi3_desc':
sort_method = hr_combi3_desc
elif sort_method_name == 'hr_combi4_desc':
sort_method = hr_combi4_desc
elif sort_method_name == 'combi1_asc':
sort_method = combi1_asc
elif sort_method_name == 'combi2_asc':
sort_method = combi2_asc
else:
raise NotImplementedError("sort_method: {} not implemented".format(sort_method_name))
config["bisection"]["correction_angle"] = correction_angle
config["bisection"]["cone_opening_angle"] = cone_opening_angle
config["bisection"]["cone_scale_factor"] = cone_scale_factor
config["bisection"]["cone_base_scale_factor"] = cone_base_scale_factor
config["compare"]["filter_threshold"] = filter_threshold
new_datastore_values = []
for key in datastore.keys():
poses = datastore[key]["compoelem"]["poses"]
datastore[key]["compoelem"]["global_action_lines"] = global_action.get_global_action_lines(poses, bisection_fallback)
datastore[key]["compoelem"]["pose_lines"] = pose_abstraction.get_pose_lines(poses, poseline_fallback)
new_datastore_values.append(datastore[key])
start_time = datetime.datetime.now()
eval_dataframe, precision_curves, all_retrieval_res = setup(new_datastore_values, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight)
norm_alias = {
"minmax_norm_by_imgrect":"Size",
"minmax_norm_by_bbox":"Bbox",
"norm_by_global_action":"Glac",
"none":"None",
}
filename = "final2_time{}_norm{}_{}_ca{}_co{}_cs{}_cbs{}_th{}_fbPl{}_fbBis{}_fbGa{}_other{}_aw{}.pkl".format(
start_time.strftime("%d%m%y%H%M%S"),
norm_alias[norm_method],
sort_method.__name__,
correction_angle,
cone_opening_angle,
cone_scale_factor,
cone_base_scale_factor,
filter_threshold,
poseline_fallback,
bisection_fallback,
glac_fallback,
compare_other,
additional_feature_weight,
)
print("filename", filename, "p@1", eval_dataframe["p@1"]["total (mean)"])
res_summary = {
"experiment_name": experiment_name,
"experiment_id": filename,
"filename": filename,
"datetime": start_time,
"setup": setup.__name__,
"eval_time_s": (datetime.datetime.now() - start_time).seconds,
"datastore_name": datastore_name,
"eval_dataframe": eval_dataframe,
"precision_curves": precision_curves,
"all_retrieval_res": all_retrieval_res,
"config": config,
"norm_method": norm_method,
"compare_method": "compare_pose_lines_3",
"sort_method": sort_method.__name__,
"compare_other": compare_other,
"correction_angle": correction_angle,
"cone_opening_angle": cone_opening_angle,
"cone_scale_factor": cone_scale_factor,
"filter_threshold": filter_threshold,
"poseline_fallback": poseline_fallback,
"bisection_fallback": bisection_fallback,
"glac_fallback": glac_fallback,
}
pickle.dump(res_summary, open(EVAL_RESULTS_FILE_DIR+filename, "wb"))
|
[] |
[] |
[
"COMPOELEM_ROOT"
] |
[]
|
["COMPOELEM_ROOT"]
|
python
| 1 | 0 | |
layer/unzip_requirements.py
|
import os
import io
import shutil
import sys
import zipfile
pkgdir = '/tmp/sls-py-req'
tempdir = '/tmp/_temp-sls-py-req'
requirements_zipname = '.requirements.zip'
sys.path.append(pkgdir)
if not os.path.exists(pkgdir):
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
default_layer_root = '/opt'
lambda_root = os.getcwd() if os.environ.get('IS_LOCAL') == 'true' else default_layer_root
zip_requirements = os.path.join(lambda_root, requirements_zipname)
# extract zipfile in memory to /tmp dir
zipfile.ZipFile(zip_requirements).extractall(tempdir)
os.rename(tempdir, pkgdir) # Atomic
|
[] |
[] |
[
"IS_LOCAL"
] |
[]
|
["IS_LOCAL"]
|
python
| 1 | 0 | |
test/runtests.py
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
from __future__ import print_function
from datetime import datetime
from multiprocessing import Pool, Manager, cpu_count
from threading import Timer
import sys
import os
import glob
import subprocess as SP
import traceback
import argparse
import xml.etree.ElementTree as ET
import re
import time
# handle command line args
parser = argparse.ArgumentParser(
description='ChakraCore *nix Test Script',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''\
Samples:
test all folders:
runtests.py
test only Array:
runtests.py Array
test a single file:
runtests.py Basics/hello.js
''')
DEFAULT_TIMEOUT = 60
SLOW_TIMEOUT = 180
parser.add_argument('folders', metavar='folder', nargs='*',
help='folder subset to run tests')
parser.add_argument('-b', '--binary', metavar='bin',
help='ch full path')
parser.add_argument('-v', '--verbose', action='store_true',
help='increase verbosity of output')
parser.add_argument('--sanitize', metavar='sanitizers',
help='ignore tests known to be broken with these sanitizers')
parser.add_argument('-d', '--debug', action='store_true',
help='use debug build');
parser.add_argument('-t', '--test', '--test-build', action='store_true',
help='use test build')
parser.add_argument('--static', action='store_true',
help='mark that we are testing a static build')
parser.add_argument('--variants', metavar='variant', nargs='+',
help='run specified test variants')
parser.add_argument('--include-slow', action='store_true',
help='include slow tests (timeout ' + str(SLOW_TIMEOUT) + ' seconds)')
parser.add_argument('--only-slow', action='store_true',
help='run only slow tests')
parser.add_argument('--nightly', action='store_true',
help='run as nightly tests')
parser.add_argument('--tag', nargs='*',
help='select tests with given tags')
parser.add_argument('--not-tag', nargs='*',
help='exclude tests with given tags')
parser.add_argument('--flags', default='',
help='global test flags to ch')
parser.add_argument('--timeout', type=int, default=DEFAULT_TIMEOUT,
help='test timeout (default ' + str(DEFAULT_TIMEOUT) + ' seconds)')
parser.add_argument('--swb', action='store_true',
help='use binary from VcBuild.SWB to run the test')
parser.add_argument('-l', '--logfile', metavar='logfile',
help='file to log results to', default=None)
parser.add_argument('--x86', action='store_true',
help='use x86 build')
parser.add_argument('--x64', action='store_true',
help='use x64 build')
parser.add_argument('-j', '--processcount', metavar='processcount', type=int,
help='number of parallel threads to use')
parser.add_argument('--warn-on-timeout', action='store_true',
help='warn when a test times out instead of labelling it as an error immediately')
parser.add_argument('--override-test-root', type=str,
help='change the base directory for the tests (where rlexedirs will be sought)')
args = parser.parse_args()
test_root = os.path.dirname(os.path.realpath(__file__))
repo_root = os.path.dirname(test_root)
# new test root
if args.override_test_root:
test_root = os.path.realpath(args.override_test_root)
# arch: x86, x64
arch = 'x86' if args.x86 else ('x64' if args.x64 else None)
if arch == None:
arch = os.environ.get('_BuildArch', 'x86')
if sys.platform != 'win32':
arch = 'x64' # xplat: hard code arch == x64
arch_alias = 'amd64' if arch == 'x64' else None
# flavor: debug, test, release
type_flavor = {'chk':'Debug', 'test':'Test', 'fre':'Release'}
flavor = 'Debug' if args.debug else ('Test' if args.test else None)
if flavor == None:
print("ERROR: Test build target wasn't defined.")
print("Try '-t' (test build) or '-d' (debug build).")
sys.exit(1)
flavor_alias = 'chk' if flavor == 'Debug' else 'fre'
# test variants
if not args.variants:
args.variants = ['interpreted', 'dynapogo']
# binary: full ch path
binary = args.binary
if binary == None:
if sys.platform == 'win32':
build = "VcBuild.SWB" if args.swb else "VcBuild"
binary = 'Build\\' + build + '\\bin\\{}_{}\\ch.exe'.format(arch, flavor)
else:
binary = 'out/{0}/ch'.format(flavor)
binary = os.path.join(repo_root, binary)
if not os.path.isfile(binary):
print('{} not found. Did you run ./build.sh already?'.format(binary))
sys.exit(1)
# global tags/not_tags
tags = set(args.tag or [])
not_tags = set(args.not_tag or []).union(['fail', 'exclude_' + arch])
if arch_alias:
not_tags.add('exclude_' + arch_alias)
if flavor_alias:
not_tags.add('exclude_' + flavor_alias)
if args.only_slow:
tags.add('Slow')
elif not args.include_slow:
not_tags.add('Slow')
elif args.include_slow and args.timeout == DEFAULT_TIMEOUT:
args.timeout = SLOW_TIMEOUT
not_tags.add('exclude_nightly' if args.nightly else 'nightly')
# verbosity
verbose = False
if args.verbose:
verbose = True
print("Emitting verbose output...")
# xplat: temp hard coded to exclude unsupported tests
if sys.platform != 'win32':
not_tags.add('exclude_xplat')
not_tags.add('Intl')
not_tags.add('require_simd')
if args.sanitize != None:
not_tags.add('exclude_sanitize_'+args.sanitize)
if args.static != None:
not_tags.add('exclude_static')
if sys.platform == 'darwin':
not_tags.add('exclude_mac')
not_compile_flags = None
# use -j flag to specify number of parallel processes
processcount = cpu_count()
if args.processcount != None:
processcount = int(args.processcount)
# handle warn on timeout
warn_on_timeout = False
if args.warn_on_timeout == True:
warn_on_timeout = True
# use tags/not_tags/not_compile_flags as case-insensitive
def lower_set(s):
return set([x.lower() for x in s] if s else [])
tags = lower_set(tags)
not_tags = lower_set(not_tags)
not_compile_flags = lower_set(not_compile_flags)
# split tags text into tags set
_empty_set = set()
def split_tags(text):
return set(x.strip() for x in text.lower().split(',')) if text \
else _empty_set
class LogFile(object):
def __init__(self, log_file_path = None):
self.file = None
if log_file_path is None:
# Set up the log file paths
# Make sure the right directory exists and the log file doesn't
log_file_name = "testrun.{0}{1}.log".format(arch, flavor)
log_file_directory = os.path.join(repo_root, "test", "logs")
if not os.path.exists(log_file_directory):
os.mkdir(log_file_directory)
self.log_file_path = os.path.join(log_file_directory, log_file_name)
if os.path.exists(self.log_file_path):
os.remove(self.log_file_path)
else:
self.log_file_path = log_file_path
self.file = open(self.log_file_path, "w")
def log(self, args):
self.file.write(args)
def __del__(self):
if not (self.file is None):
self.file.close()
if __name__ == '__main__':
log_file = LogFile(args.logfile)
def log_message(msg = ""):
log_file.log(msg + "\n")
def print_and_log(msg = ""):
print(msg)
log_message(msg)
# remove carriage returns at end of line to avoid platform difference
def normalize_new_line(text):
return re.sub(b'[\r]+\n', b'\n', text)
# A test simply contains a collection of test attributes.
# Misc attributes added by test run:
# id unique counter to identify a test
# filename full path of test file
# elapsed_time elapsed time when running the test
#
class Test(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# support dot syntax for normal attribute access
def __getattr__(self, key):
return super(Test, self).__getattr__(key) if key.startswith('__') \
else self.get(key)
# mark start of this test run, to compute elapsed_time
def start(self):
self.start_time = datetime.now()
# mark end of this test run, compute elapsed_time
def done(self):
if not self.elapsed_time:
self.elapsed_time = (datetime.now() - self.start_time)\
.total_seconds()
# records pass_count/fail_count
class PassFailCount(object):
def __init__(self):
self.pass_count = 0
self.fail_count = 0
def __str__(self):
return 'passed {}, failed {}'.format(self.pass_count, self.fail_count)
def total_count(self):
return self.pass_count + self.fail_count
# records total and individual folder's pass_count/fail_count
class TestResult(PassFailCount):
def __init__(self):
super(self.__class__, self).__init__()
self.folders = {}
def _get_folder_result(self, folder):
r = self.folders.get(folder)
if not r:
r = PassFailCount()
self.folders[folder] = r
return r
def log(self, filename, fail=False):
folder = os.path.basename(os.path.dirname(filename))
r = self._get_folder_result(folder)
if fail:
r.fail_count += 1
self.fail_count += 1
else:
r.pass_count += 1
self.pass_count += 1
# test variants:
# interpreted: -maxInterpretCount:1 -maxSimpleJitRunCount:1 -bgjit-
# dynapogo: -forceNative -off:simpleJit -bgJitDelay:0
class TestVariant(object):
def __init__(self, name, compile_flags=[], variant_not_tags=[]):
self.name = name
self.compile_flags = \
['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
'-BaselineMode'] + compile_flags
self._compile_flags_has_expansion = self._has_expansion(compile_flags)
self.tags = tags.copy()
self.not_tags = not_tags.union(variant_not_tags).union(
['{}_{}'.format(x, name) for x in ('fails','exclude')])
self.msg_queue = Manager().Queue() # messages from multi processes
self.test_result = TestResult()
self.test_count = 0
self._print_lines = [] # _print lines buffer
self._last_len = 0
@staticmethod
def _has_expansion(flags):
return any(re.match('.*\${.*}', f) for f in flags)
@staticmethod
def _expand(flag, test):
return re.sub('\${id}', str(test.id), flag)
def _expand_compile_flags(self, test):
if self._compile_flags_has_expansion:
return [self._expand(flag, test) for flag in self.compile_flags]
return self.compile_flags
# check if this test variant should run a given test
def _should_test(self, test):
tags = split_tags(test.get('tags'))
if not tags.isdisjoint(self.not_tags):
return False
if self.tags and not self.tags.issubset(tags):
return False
if not_compile_flags: # exclude unsupported compile-flags if any
flags = test.get('compile-flags')
if flags and \
not not_compile_flags.isdisjoint(flags.lower().split()):
return False
return True
# print output from multi-process run, to be sent with result message
def _print(self, line):
self._print_lines.append(str(line))
# queue a test result from multi-process runs
def _log_result(self, test, fail):
output = '\n'.join(self._print_lines) # collect buffered _print output
self._print_lines = []
self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
# (on main process) process one queued message
def _process_msg(self, msg):
filename, fail, elapsed_time, output = msg
self.test_result.log(filename, fail=fail)
line = '[{}/{} {:4.2f}] {} -> {}'.format(
self.test_result.total_count(),
self.test_count,
elapsed_time,
'Failed' if fail else 'Passed',
self._short_name(filename))
padding = self._last_len - len(line)
print(line + ' ' * padding, end='\n' if fail else '\r')
log_message(line)
self._last_len = len(line) if not fail else 0
if len(output) > 0:
print_and_log(output)
# get a shorter test file path for display only
def _short_name(self, filename):
folder = os.path.basename(os.path.dirname(filename))
return os.path.join(folder, os.path.basename(filename))
# (on main process) wait and process one queued message
def _process_one_msg(self):
self._process_msg(self.msg_queue.get())
# log a failed test with details
def _show_failed(self, test, flags, exit_code, output,
expected_output=None, timedout=False):
if timedout:
if warn_on_timeout:
self._print('WARNING: Test timed out!')
else:
self._print('ERROR: Test timed out!')
self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
if expected_output == None or timedout:
self._print("\nOutput:")
self._print("----------------------------")
self._print(output.decode('utf-8'))
self._print("----------------------------")
else:
lst_output = output.split(b'\n')
lst_expected = expected_output.split(b'\n')
ln = min(len(lst_output), len(lst_expected))
for i in range(0, ln):
if lst_output[i] != lst_expected[i]:
self._print("Output: (at line " + str(i) + ")")
self._print("----------------------------")
self._print(lst_output[i])
self._print("----------------------------")
self._print("Expected Output:")
self._print("----------------------------")
self._print(lst_expected[i])
self._print("----------------------------")
break
self._print("exit code: {}".format(exit_code))
if warn_on_timeout and timedout:
self._log_result(test, fail=False)
else:
self._log_result(test, fail=True)
# temp: try find real file name on hard drive if case mismatch
def _check_file(self, folder, filename):
path = os.path.join(folder, filename)
if os.path.isfile(path):
return path # file exists on disk
filename_lower = filename.lower()
files = os.listdir(folder)
for i in range(len(files)):
if files[i].lower() == filename_lower:
self._print('\nWARNING: {} should be {}\n'.format(
path, files[i]))
return os.path.join(folder, files[i])
# cann't find the file, just return the path and let it error out
return path
# run one test under this variant
def test_one(self, test):
try:
test.start()
self._run_one_test(test)
except Exception:
test.done()
self._print(traceback.format_exc())
self._log_result(test, fail=True)
# internally perform one test run
def _run_one_test(self, test):
folder = test.folder
js_file = test.filename = self._check_file(folder, test.files)
js_output = b''
working_path = os.path.dirname(js_file)
flags = test.get('compile-flags') or ''
flags = self._expand_compile_flags(test) + \
args.flags.split() + \
flags.split()
cmd = [binary] + flags + [os.path.basename(js_file)]
test.start()
proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
timeout_data = [proc, False]
def timeout_func(timeout_data):
timeout_data[0].kill()
timeout_data[1] = True
timeout = test.get('timeout', args.timeout) # test override or default
timer = Timer(timeout, timeout_func, [timeout_data])
try:
timer.start()
js_output = normalize_new_line(proc.communicate()[0])
exit_code = proc.wait()
finally:
timer.cancel()
test.done()
# shared _show_failed args
fail_args = { 'test': test, 'flags': flags,
'exit_code': exit_code, 'output': js_output };
# check timed out
if (timeout_data[1]):
return self._show_failed(timedout=True, **fail_args)
# check ch failed
if exit_code != 0:
return self._show_failed(**fail_args)
# check output
if 'baseline' not in test:
# output lines must be 'pass' or 'passed' or empty
lines = (line.lower() for line in js_output.split(b'\n'))
if any(line != b'' and line != b'pass' and line != b'passed'
for line in lines):
return self._show_failed(**fail_args)
else:
baseline = test.get('baseline')
if baseline:
# perform baseline comparison
baseline = self._check_file(working_path, baseline)
with open(baseline, 'rb') as bs_file:
baseline_output = bs_file.read()
# Cleanup carriage return
# todo: remove carriage return at the end of the line
# or better fix ch to output same on all platforms
expected_output = normalize_new_line(baseline_output)
if expected_output != js_output:
return self._show_failed(
expected_output=expected_output, **fail_args)
# passed
if verbose:
self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
self._log_result(test, fail=False)
# run tests under this variant, using given multiprocessing Pool
def _run(self, tests, pool):
print_and_log('\n############# Starting {} variant #############'\
.format(self.name))
if self.tags:
print_and_log(' tags: {}'.format(self.tags))
for x in self.not_tags:
print_and_log(' exclude: {}'.format(x))
print_and_log()
# filter tests to run
tests = [x for x in tests if self._should_test(x)]
self.test_count += len(tests)
# run tests in parallel
result = pool.map_async(run_one, [(self,test) for test in tests])
while self.test_result.total_count() != self.test_count:
self._process_one_msg()
# print test result summary
def print_summary(self):
print_and_log('\n######## Logs for {} variant ########'\
.format(self.name))
for folder, result in sorted(self.test_result.folders.items()):
print_and_log('{}: {}'.format(folder, result))
print_and_log("----------------------------")
print_and_log('Total: {}'.format(self.test_result))
# run all tests from testLoader
def run(self, testLoader, pool, sequential_pool):
tests, sequential_tests = [], []
for folder in testLoader.folders():
if folder.tags.isdisjoint(self.not_tags):
dest = tests if not folder.is_sequential else sequential_tests
dest += folder.tests
if tests:
self._run(tests, pool)
if sequential_tests:
self._run(sequential_tests, sequential_pool)
# global run one test function for multiprocessing, used by TestVariant
def run_one(data):
try:
variant, test = data
variant.test_one(test)
except Exception:
print('ERROR: Unhandled exception!!!')
traceback.print_exc()
# A test folder contains a list of tests and maybe some tags.
class TestFolder(object):
def __init__(self, tests, tags=_empty_set):
self.tests = tests
self.tags = tags
self.is_sequential = 'sequential' in tags
# TestLoader loads all tests
class TestLoader(object):
def __init__(self, paths):
self._folder_tags = self._load_folder_tags()
self._test_id = 0
self._folders = []
for path in paths:
if os.path.isfile(path):
folder, file = os.path.dirname(path), os.path.basename(path)
else:
folder, file = path, None
ftags = self._get_folder_tags(folder)
if ftags != None: # Only honor entries listed in rlexedirs.xml
tests = self._load_tests(folder, file)
self._folders.append(TestFolder(tests, ftags))
def folders(self):
return self._folders
# load folder/tags info from test_root/rlexedirs.xml
@staticmethod
def _load_folder_tags():
xmlpath = os.path.join(test_root, 'rlexedirs.xml')
try:
xml = ET.parse(xmlpath).getroot()
except IOError:
print_and_log('ERROR: failed to read {}'.format(xmlpath))
exit(-1)
folder_tags = {}
for x in xml:
d = x.find('default')
key = d.find('files').text.lower() # avoid case mismatch
tags = d.find('tags')
folder_tags[key] = \
split_tags(tags.text) if tags != None else _empty_set
return folder_tags
# get folder tags if any
def _get_folder_tags(self, folder):
key = os.path.basename(os.path.normpath(folder)).lower()
return self._folder_tags.get(key)
def _next_test_id(self):
self._test_id += 1
return self._test_id
# load all tests in folder using rlexe.xml file
def _load_tests(self, folder, file):
try:
xmlpath = os.path.join(folder, 'rlexe.xml')
xml = ET.parse(xmlpath).getroot()
except IOError:
return []
def test_override(condition, check_tag, check_value, test):
target = condition.find(check_tag)
if target != None and target.text == check_value:
for override in condition.find('override'):
test[override.tag] = override.text
def load_test(testXml):
test = Test(folder=folder)
for c in testXml.find('default'):
if c.tag == 'timeout': # timeout seconds
test[c.tag] = int(c.text)
elif c.tag == 'tags' and c.tag in test: # merge multiple <tags>
test[c.tag] = test[c.tag] + ',' + c.text
else:
test[c.tag] = c.text
condition = testXml.find('condition')
if condition != None:
test_override(condition, 'target', arch_alias, test)
return test
tests = [load_test(x) for x in xml]
if file != None:
tests = [x for x in tests if x.files == file]
if len(tests) == 0 and self.is_jsfile(file):
tests = [Test(folder=folder, files=file, baseline='')]
for test in tests: # assign unique test.id
test.id = self._next_test_id()
return tests
@staticmethod
def is_jsfile(path):
return os.path.splitext(path)[1] == '.js'
def main():
# Set the right timezone, the tests need Pacific Standard Time
# TODO: Windows. time.tzset only supports Unix
if hasattr(time, 'tzset'):
os.environ['TZ'] = 'US/Pacific'
time.tzset()
# By default run all tests
if len(args.folders) == 0:
files = (os.path.join(test_root, x) for x in os.listdir(test_root))
args.folders = [f for f in sorted(files) if not os.path.isfile(f)]
# load all tests
testLoader = TestLoader(args.folders)
# test variants
variants = [x for x in [
TestVariant('interpreted', [
'-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-',
'-dynamicprofilecache:profile.dpl.${id}'
]),
TestVariant('dynapogo', [
'-forceNative', '-off:simpleJit', '-bgJitDelay:0',
'-dynamicprofileinput:profile.dpl.${id}'
]),
TestVariant('disable_jit', [
'-nonative'
], [
'exclude_interpreted', 'fails_interpreted', 'require_backend'
])
] if x.name in args.variants]
# rm profile.dpl.*
for f in glob.glob(test_root + '/*/profile.dpl.*'):
os.remove(f)
# run each variant
pool, sequential_pool = Pool(processcount), Pool(1)
start_time = datetime.now()
for variant in variants:
variant.run(testLoader, pool, sequential_pool)
elapsed_time = datetime.now() - start_time
# print summary
for variant in variants:
variant.print_summary()
print()
failed = any(variant.test_result.fail_count > 0 for variant in variants)
print('[{}] {}'.format(
str(elapsed_time), 'Success!' if not failed else 'Failed!'))
return 1 if failed else 0
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"TZ",
"_BuildArch"
] |
[]
|
["TZ", "_BuildArch"]
|
python
| 2 | 0 | |
src/main/java/com/snowball/finder/finderskill/client/ApiGatewayRestClient.java
|
package com.snowball.finder.finderskill.client;
import com.amazonaws.auth.*;
import com.mashape.unirest.http.HttpResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
// TODO: Initialise this with a builder
public class ApiGatewayRestClient extends BasicAuthClient {
static final Logger logger = LoggerFactory.getLogger(ApiGatewayRestClient.class);
private String accessKeyId;
private String secretKey;
private String sessionToken;
public ApiGatewayRestClient() {
super();
EnvironmentVariableCredentialsProvider provider = new EnvironmentVariableCredentialsProvider();
AWSCredentials cred = provider.getCredentials();
accessKeyId = cred.getAWSAccessKeyId();
secretKey = cred.getAWSSecretKey();
sessionToken = Optional.ofNullable(System.getenv("AWS_SESSION_TOKEN")).orElse("");
}
public <T> HttpResponse<T> getWithHMAC(String endpoint, Map<String, String> queryParams, Map<String, String> routeParams, Class<T> clazz) throws Exception {
URI uri = new URI(endpoint);
String method = "GET";
String service = "execute-api";
String region = Optional.ofNullable(System.getenv("AWS_REGION")).orElse("eu-west-1");
String algorithm = "AWS4-HMAC-SHA256";
String contentType = "application/x-www-form-urlencoded"; //Set to "application/x-amz-json-1.0" for PUT, POST or PATCH
String signedHeader = "content-type;host;x-amz-date;x-amz-security-token"; //Headers must be in alphabetical order
LocalDateTime now = LocalDateTime.now();
String amzDate = now.format(DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss'Z'"));
String datestamp = now.format(DateTimeFormatter.ofPattern("yyyyMMdd"));
HMACGetRequest request = new HMACGetRequest(endpoint);
String canonicalHeaders = "content-type:" + contentType + "\n" + "host:" + uri.getHost() + "\n" + "x-amz-date:" + amzDate + "\n" +
"x-amz-security-token:" + sessionToken + "\n";
if (accessKeyId.isEmpty() || secretKey.isEmpty()) {
throw new RuntimeException("IAM credentials missing");
}
if(routeParams != null) {
routeParams.forEach(request::routeParam);
}
// TODO: Sort the query params alphabetically before adding
if (queryParams != null) {
queryParams.forEach(request::queryString);
}
String canonicalRequest = getCanonicalRequest(method, signedHeader, canonicalHeaders, request);
String authHeader = getAuthorisationHeaderContent(service, region, algorithm, signedHeader, amzDate, datestamp, canonicalRequest);
HashMap<String, String> headers = new HashMap<>();
headers.put("Content-Type", contentType);
headers.put("Host", uri.getHost());
headers.put("X-Amz-Date", amzDate);
headers.put("Authorization", authHeader);
request.headers(headers);
HttpResponse<T> response = request.asObject(clazz);
return response;
}
protected String getCanonicalRequest(String method, String signedHeader, String canonicalHeaders, HMACGetRequest request) throws URISyntaxException, NoSuchAlgorithmException {
String canonicalQueryString = request.getCanonicalQuery();
String canonicalUri = request.getCanonicalUri();
return method + "\n" + canonicalUri + "\n" + canonicalQueryString + "\n" +
canonicalHeaders + "\n" + signedHeader + "\n" + getPayloadHash("");
}
protected String getAuthorisationHeaderContent(String service, String region, String algorithm, String signedHeader, String amzDate, String datestamp, String canonicalRequest) throws Exception {
String credentialScope = datestamp + '/' + region + '/' + service + '/' + "aws4_request";
String stringToSign = algorithm + '\n' + amzDate + '\n' + credentialScope + '\n' + getPayloadHash(canonicalRequest);
byte[] signingKey = getSignatureKey(secretKey, datestamp, region, service);
String signature = bytesToHex(HmacSHA256(stringToSign, signingKey));
return algorithm + " " + "Credential=" + accessKeyId + '/' + credentialScope + ", " + "SignedHeaders=" + signedHeader + ", " + "Signature=" + signature;
}
protected static String getPayloadHash(String payload) throws NoSuchAlgorithmException {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
byte[] encodedhash = digest.digest(payload.getBytes(StandardCharsets.UTF_8));
return bytesToHex(encodedhash);
}
protected static byte[] getSignatureKey(String key, String dateStamp, String regionName, String serviceName) throws Exception {
byte[] kSecret = ("AWS4" + key).getBytes("UTF-8");
byte[] kDate = HmacSHA256(dateStamp, kSecret);
byte[] kRegion = HmacSHA256(regionName, kDate);
byte[] kService = HmacSHA256(serviceName, kRegion);
byte[] kSigning = HmacSHA256("aws4_request", kService);
return kSigning;
}
protected static byte[] HmacSHA256(String data, byte[] key) throws Exception {
String algorithm="HmacSHA256";
Mac mac = Mac.getInstance(algorithm);
mac.init(new SecretKeySpec(key, algorithm));
return mac.doFinal(data.getBytes("UTF-8"));
}
protected static String bytesToHex(byte[] hash) {
StringBuffer hexString = new StringBuffer();
for (int i = 0; i < hash.length; i++) {
String hex = Integer.toHexString(0xff & hash[i]);
if(hex.length() == 1) hexString.append('0');
hexString.append(hex);
}
return hexString.toString();
}
}
|
[
"\"AWS_SESSION_TOKEN\"",
"\"AWS_REGION\""
] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_REGION"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_REGION"]
|
java
| 2 | 0 | |
helpers/tokenHelper.go
|
package helpers
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/AaronRebel09/golang-deployment-pipeline/database"
"github.com/dgrijalva/jwt-go"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// SignedDetails
type SignedDetails struct {
Email string
First_name string
Last_name string
Uid string
jwt.StandardClaims
}
var userCollection *mongo.Collection = database.OpenCollection(database.Client, "user")
var SECRET_KEY string = os.Getenv("SECRET_KEY")
// GenerateAllTokens generates both the detailed token and refresh token
func GenerateAllTokens(email string, firstName string, lastName string, uid string) (signedToken string, signedRefreshtoken string, err error) {
claims := &SignedDetails{
Email: email,
First_name: firstName,
Last_name: lastName,
Uid: uid,
StandardClaims: jwt.StandardClaims{
ExpiresAt: time.Now().Local().Add(time.Hour * time.Duration(24)).Unix(),
},
}
refreshClaims := &SignedDetails{
StandardClaims: jwt.StandardClaims{
ExpiresAt: time.Now().Local().Add(time.Hour * time.Duration(168)).Unix(),
},
}
token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(SECRET_KEY))
refreshToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, refreshClaims).SignedString([]byte(SECRET_KEY))
if err != nil {
log.Panic(err)
return
}
return token, refreshToken, err
}
// ValidateToken validates the jwt token
func ValidateToken(signedToken string) (claims *SignedDetails, msg string) {
//Quitamos el prefijo Bearer del token
signedToken = strings.ReplaceAll(signedToken, "Bearer ", "")
token, err := jwt.ParseWithClaims(
signedToken,
&SignedDetails{},
func(token *jwt.Token) (interface{}, error) {
return []byte(SECRET_KEY), nil
},
)
if err != nil {
msg = err.Error()
return
}
claims, ok := token.Claims.(*SignedDetails)
if !ok {
msg = fmt.Sprintf("the token is invalid")
msg = err.Error()
return
}
if claims.ExpiresAt < time.Now().Local().Unix() {
msg = fmt.Sprintf("token expired")
msg = err.Error()
return
}
return claims, msg
}
// UpdateAllTokens renews the user tokens when they login
func UpdateAllTokens(signedToken string, signedRefreshToken string, userId string) {
var ctx, cancel = context.WithTimeout(context.Background(), 100*time.Second)
var updateObj primitive.D
updateObj = append(updateObj, bson.E{"token", signedToken})
updateObj = append(updateObj, bson.E{"refresh_token", signedRefreshToken})
Updated_at, _ := time.Parse(time.RFC3339, time.Now().Format(time.RFC3339))
updateObj = append(updateObj, bson.E{"updated_at", Updated_at})
upsert := true
filter := bson.M{"user_id": userId}
opt := options.UpdateOptions{
Upsert: &upsert,
}
_, err := userCollection.UpdateOne(
ctx,
filter,
bson.D{
{"$set", updateObj},
},
&opt,
)
defer cancel()
if err != nil {
log.Panic(err)
return
}
return
}
|
[
"\"SECRET_KEY\""
] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
go
| 1 | 0 | |
api/spotify.py
|
import os
import random
import requests
from base64 import b64encode
from dotenv import load_dotenv, find_dotenv
from flask import Flask, Response, render_template
load_dotenv(find_dotenv())
SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
SPOTIFY_SECRET_ID = os.getenv("SPOTIFY_SECRET_ID")
SPOTIFY_REFRESH_TOKEN = os.getenv("SPOTIFY_REFRESH_TOKEN")
REFRESH_TOKEN_URL = "https://accounts.spotify.com/api/token"
NOW_PLAYING_URL = "https://api.spotify.com/v1/me/player/currently-playing"
RECENTLY_PLAYING_URL = "https://api.spotify.com/v1/me/player/recently-played?limit=10"
app = Flask(__name__)
def getAuth():
return b64encode(f"{SPOTIFY_CLIENT_ID}:{SPOTIFY_SECRET_ID}".encode()).decode("ascii")
def refreshToken():
data = {
"grant_type": "refresh_token",
"refresh_token": SPOTIFY_REFRESH_TOKEN,
}
headers = {"Authorization": "Basic {}".format(getAuth())}
response = requests.post(REFRESH_TOKEN_URL, data=data, headers=headers)
return response.json()["access_token"]
def nowPlaying():
headers = {"Authorization": f"Bearer {refreshToken()}"}
response = requests.get(NOW_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def recentlyPlayed():
headers = {"Authorization": f"Bearer {refreshToken()}"}
response = requests.get(RECENTLY_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def addBar(barNr, startPixel, bartype, lowSpeed, highSpeed, ):
bar = "<div class='" + bartype + "Bar'></div>"
animationSpeed = random.randint(lowSpeed, highSpeed)
barCSS = (
"." + bartype + "Bar:nth-child({}) {{ left: {}px; animation-duration: {}ms; }}"
.format(barNr, startPixel, animationSpeed)
)
return bar, barCSS
def getRandomBarType(barNr):
# Distribtes base frequencies to the right, and high frequensies to the left.
if barNr < 15:
bartype = random.randint(0, 1)
elif barNr < 75:
bartype = random.randint(0, 2)
else:
bartype = random.randint(1, 2)
return bartype
def generateBars():
barCount, startPixel = 90, 1 # barCount has to be a multiple of 3
bars, barsCSS = "", ""
barLayout = "position: absolute;" \
"width: 4px;" \
"bottom: 1px;" \
"height: 15px;" \
"background: #21AF43;" \
"border-radius: 1px 1px 0px 0px;"
bartypes = [("high", 500, 1000),
("medium", 650, 810),
("base", 349, 351)]
for i in range(1, barCount):
bartype = getRandomBarType(i)
newBar, newBarCSS = addBar(i, startPixel, bartypes[bartype][0], bartypes[bartype][1], bartypes[bartype][2])
bars += newBar
barsCSS += newBarCSS
startPixel += 4
return barsCSS, barLayout, bars
def loadImageB64(url):
return b64encode(requests.get(url).content).decode("ascii")
def makeSVG(data):
currentlyPlaying = data != {} and data["item"] != "None" and (data["item"]["is_local"] is False)
if currentlyPlaying:
currentStatus = "🎧 Vibing to"
item = data["item"]
# Create the animated bars
barCSS, barLayout, animatedBars = generateBars()
else:
currentStatus = "🎧 Recently vibed to"
# get random track from recently played, filter away local tracks
recentPlays = [item for item in recentlyPlayed()["items"] if item["track"]["is_local"] is not True]
itemIndex = random.randint(0, len(recentPlays) - 1)
item = recentPlays[itemIndex]["track"]
animatedBars, barLayout, barCSS = "", "", ""
# Data that is sent to html
dataDict = {
"status": currentStatus,
"image": loadImageB64(item["album"]["images"][1]["url"]),
"songName": item["name"].replace("&", "&"),
"artistName": item["artists"][0]["name"].replace("&", "&"),
"explicit": item["explicit"],
# "previewLink": item["preview_url"],
"trackLink": item["external_urls"]["spotify"],
# "popularity": item["popularity"],
"animatedBars": animatedBars,
"barLayout": barLayout,
"barCSS": barCSS,
}
return render_template("spotify.html.j2", **dataDict)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def catch_all(path):
data = nowPlaying()
svg = makeSVG(data)
resp = Response(svg, mimetype="image/svg+xml")
resp.headers["Cache-Control"] = "s-maxage=1"
return resp
if __name__ == "__main__":
app.run(debug=True)
|
[] |
[] |
[
"SPOTIFY_REFRESH_TOKEN",
"SPOTIFY_CLIENT_ID",
"SPOTIFY_SECRET_ID"
] |
[]
|
["SPOTIFY_REFRESH_TOKEN", "SPOTIFY_CLIENT_ID", "SPOTIFY_SECRET_ID"]
|
python
| 3 | 0 | |
vendor/github.com/eraclitux/cfgp/common.go
|
// cfgp - go configuration file parser package
// Copyright (c) 2015 Andrea Masi. All rights reserved.
// Use of this source code is governed by MIT license
// which that can be found in the LICENSE.txt file.
// Package cfgp is a configuration parser that loads configuration in a struct from files
// and automatically creates cli flags.
//
// Just define a struct with needed configuration. Values are then taken from multiple source
// in this order of precendece:
//
// - command line arguments (which are automagically created and parsed)
// - configuration file
//
// Tags
//
// Default is to use lower cased field names in struct to create command line arguments.
// Tags can be used to specify different names, command line help message
// and section in conf file.
//
// Format is:
// <name>,<help message>,<section in file>
//
// Simplest configuration file
//
// cfgp.Path variable can be set to the path of a configuration file.
// For default it is initialized to the value of evirontment variable:
//
// CFGP_FILE_PATH
//
// Files ending with:
// ini|txt|cfg
// will be parsed as INI informal standard:
//
// https://en.wikipedia.org/wiki/INI_file
//
// First letter of every key found is upper cased and than,
// a struct field with same name is searched:
//
// user -> User
// portNumber -> PortNumber
//
// If such field name is not found than comparisson is made against
// key specified as first element in tag.
//
// cfgp tries to be modular and easily extendible to support different formats.
//
// This is a work in progress, APIs can change.
package cfgp
import (
"errors"
"flag"
"fmt"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"github.com/eraclitux/stracer"
)
// TODO use struct tag to set a fiels as required
var ErrNeedPointer = errors.New("cfgp: pointer to struct expected")
var ErrFileFormat = errors.New("cfgp: unrecognized file format, only (ini|txt|cfg) supported")
var ErrUnknownFlagType = errors.New("cfgp: unknown flag type")
// Path is the path to configuration file.
// For default is populated with env var CFGP_FILE_PATH.
// This could be left empty if no configuration
// file is needed.
var Path string
func getStructValue(confPtr interface{}) (reflect.Value, error) {
v := reflect.ValueOf(confPtr)
if v.Kind() == reflect.Ptr {
return v.Elem(), nil
}
return reflect.Value{}, ErrNeedPointer
}
// myFlag implements Flag.Value.
type myFlag struct {
// FIXME field is used only for debugging
// purposes. Remove it?
field reflect.StructField
fieldValue reflect.Value
isBool bool
}
// String () is used to print default value by PrintDefaults().
func (s *myFlag) String() string {
var ret string
switch s.fieldValue.Kind() {
case reflect.String:
ret = s.fieldValue.String()
case reflect.Int:
ret = strconv.Itoa(int(s.fieldValue.Int()))
case reflect.Bool:
ret = strconv.FormatBool(s.fieldValue.Bool())
case reflect.Float64:
ret = strconv.FormatFloat(s.fieldValue.Float(), 'e', -1, 64)
default:
ret = "???"
}
return ret
}
// IsBoolFlag istructs the command-line parser
// to makes -name equivalent to -name=true rather than
// using the next command-line argument.
func (s *myFlag) IsBoolFlag() bool {
return s.isBool
}
// assignType assigns passed arg string to underlying Go type.
func assignType(fieldValue reflect.Value, arg string) error {
if !fieldValue.CanSet() {
return ErrUnknownFlagType
}
switch fieldValue.Kind() {
case reflect.Int:
n, err := strconv.Atoi(arg)
if err != nil {
return err
}
fieldValue.SetInt(int64(n))
case reflect.Float64:
f, err := strconv.ParseFloat(arg, 64)
if err != nil {
return err
}
fieldValue.SetFloat(f)
case reflect.String:
fieldValue.SetString(arg)
case reflect.Bool:
b, err := strconv.ParseBool(arg)
if err != nil {
return err
}
fieldValue.SetBool(b)
default:
return ErrUnknownFlagType
}
return nil
}
// Set converts passed arguments to actual Go types.
func (s *myFlag) Set(arg string) error {
stracer.Traceln("setting flag", s.field.Name)
err := assignType(s.fieldValue, arg)
if err != nil {
return err
}
return nil
}
func helpMessageFromTags(f reflect.StructField) (string, bool) {
t := f.Tag.Get("cfgp")
tags := strings.Split(t, ",")
if len(tags) == 3 {
return tags[1], true
}
return "", false
}
func makeHelpMessage(f reflect.StructField) string {
var helpM string
switch f.Type.Kind() {
case reflect.Int:
if m, ok := helpMessageFromTags(f); ok {
helpM = m + ", an int"
} else {
helpM = "set an int"
}
case reflect.String:
if m, ok := helpMessageFromTags(f); ok {
helpM = m + ", a string"
} else {
helpM = "set a string"
}
case reflect.Bool:
if m, ok := helpMessageFromTags(f); ok {
helpM = m + ", a bool"
} else {
helpM = "set a bool"
}
case reflect.Float64:
if m, ok := helpMessageFromTags(f); ok {
helpM = m + ", a float64"
} else {
helpM = "set a float64"
}
default:
helpM = "unknown flag kind"
}
return helpM
}
func isBool(v reflect.Value) bool {
if v.Kind() == reflect.Bool {
return true
}
return false
}
func nameFromTags(f reflect.StructField) (string, bool) {
t := f.Tag.Get("cfgp")
tags := strings.Split(t, ",")
// if name position is empty return false es:
// `cfgp:",help message,"`
if len(tags) == 3 && tags[0] != "" {
return tags[0], true
}
return "", false
}
// FIXME can we semplify using structType := structValue.Type()?
func createFlag(f reflect.StructField, fieldValue reflect.Value, fs *flag.FlagSet) {
name := strings.ToLower(f.Name)
if n, ok := nameFromTags(f); ok {
name = n
}
stracer.Traceln("creating flag:", name)
fs.Var(&myFlag{f, fieldValue, isBool(fieldValue)}, name, makeHelpMessage(f))
}
// hasTestFlag helps to identify if a test
// is running with flags that can make
// flagSet.Parse() fail.
func hasTestFlag([]string) bool {
for _, f := range os.Args[1:] {
if f == `-test.v=true` {
stracer.Traceln("test flag found")
return true
}
}
return false
}
// parseFlags parses struct fields, creates command line arguments
// and check if they are passed as arguments.
func parseFlags(s reflect.Value) error {
flagSet := flag.NewFlagSet("cfgp", flag.ExitOnError)
flagSet.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flagSet.PrintDefaults()
}
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
fieldValue := s.Field(i)
if fieldValue.CanSet() {
createFlag(typeOfT.Field(i), fieldValue, flagSet)
}
}
args := os.Args[1:]
if hasTestFlag(os.Args[1:]) {
args = []string{}
}
err := flagSet.Parse(args)
if err != nil {
return err
}
return nil
}
// Parse popolate passed struct (via pointer) with configuration from various source.
// It guesses configuration type by file extention and call specific parser.
// (.ini|.txt|.cfg) are evaluated as INI files which is to only format supported for now.
// path can be an empty string to disable file parsing.
func Parse(confPtr interface{}) error {
structValue, err := getStructValue(confPtr)
if err != nil {
return err
}
if Path != "" {
if match, _ := regexp.MatchString(`\.(ini|txt|cfg)$`, Path); match {
err := parseINI(Path, structValue)
if err != nil {
return err
}
} else if match, _ := regexp.MatchString(`\.(yaml)$`, Path); match {
return errors.New("YAML not yet implemented. Want you help?")
} else {
return ErrFileFormat
}
}
// Command line arguments override configuration file.
err = parseFlags(structValue)
if err != nil {
return err
}
return nil
}
func init() {
Path = os.Getenv("CFGP_FILE_PATH")
stracer.Traceln("init file path:", Path)
}
|
[
"\"CFGP_FILE_PATH\""
] |
[] |
[
"CFGP_FILE_PATH"
] |
[]
|
["CFGP_FILE_PATH"]
|
go
| 1 | 0 | |
app/__init__.py
|
"""
module init
"""
from flask import Flask
<<<<<<< HEAD
from config import config_options
from flask_sqlalchemy import SQLAlchemy
import os
=======
from config import DevelopmentConfig
from .views import orders_blue_print
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
def create_app(DevelopmentConfig):
"""
Function create_app:
creates app and gives it the import name
holds the configuration being used.
registers the orders blueprint
:return: app:
"""
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
app.register_blueprint(orders_blue_print)
<<<<<<< HEAD
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# set the configurations
app.config.from_object(os.environ['APP_SETTINGS'])
db=SQLAlchemy(app)
# initialiaze the database
db.init_app(app)
with app.app_context():
from .import routes
db.create_all
# register your blueprints here
from app.main import main
from app.auth import auth
app.register_blueprint(main)
app.register_blueprint(auth)
@app.route('/')
def hello():
return "Hello World!"
return app
=======
return app
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
|
[] |
[] |
[
"APP_SETTINGS"
] |
[]
|
["APP_SETTINGS"]
|
python
| 1 | 0 | |
pixela/data_source_graphs_test.go
|
package pixela
import (
"context"
"os"
"testing"
"github.com/budougumi0617/pixela"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
)
func Test_dataSourceGraphsRead(t *testing.T) {
tests := [...]struct {
name string
want diag.Diagnostics
}{
{
name: "confirmClientResponse",
want: nil,
},
}
for _, tt := range tests {
usename := os.Getenv("PIXELA_USERNAME")
token := os.Getenv("PIXELA_TOKEN")
if usename == "" || token == "" {
t.SkipNow()
}
t.Run(tt.name, func(t *testing.T) {
m := pixela.New(usename, token)
d := dataSourceGraphs().TestResourceData()
got := dataSourceGraphsRead(context.TODO(), d, m)
if diff := cmp.Diff(got, tt.want); diff != "" {
t.Errorf("dataSourceGraphsRead: (-got +want)\n%s", diff)
}
})
}
}
|
[
"\"PIXELA_USERNAME\"",
"\"PIXELA_TOKEN\""
] |
[] |
[
"PIXELA_TOKEN",
"PIXELA_USERNAME"
] |
[]
|
["PIXELA_TOKEN", "PIXELA_USERNAME"]
|
go
| 2 | 0 | |
library/python/pysandesh/trace.py
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Trace
#
import collections
import itertools
class TraceBuffer(object):
def __init__(self, name, size, enable=True):
self._name = name
self._size = size
self._enable = enable
self._seqno = 0
self._buf = collections.deque(maxlen=self._size)
# Reserve 0 and max(uint32_t)
self._kMaxSeqno = ((2 ** 32) - 1) - 1;
self._kMinSeqno = 1;
self._read_context_map = {}
self._wrap = False
self._read_index = 0
self._write_index = 0
#end __init__
# Public functions
def TraceOn(self):
self._enable = True
#end TraceOn
def TraceOff(self):
self._enable = False
#end TraceOff
def IsTraceOn(self):
return self._enable
#end IsTraceOn
def TraceBufSizeGet(self):
return self._size
#end TraceBufSizeGet
def TraceWrite(self, entry):
# Add the trace to the right
self._buf.append(entry);
# Once the trace buffer is wrapped, increment the read index
if self._wrap:
self._read_index += 1
if self._read_index == self._size:
self._read_index = 0
# Increment the write_index_ and reset upon reaching trace_buf_size_
self._write_index += 1
if self._write_index == self._size:
self._write_index = 0
self._wrap = True
# Trace messages could be read in batches instead of reading
# the entire trace buffer in one shot. Therefore, trace messages
# could be added between subsequent read requests. If the
# read_index_ [points to the oldest message in the trace buffer]
# becomes same as the read index [points to the position in the
# trace buffer from where the next trace message should be read]
# stored in the read context, then there is no need to remember the
# read context.
for key, value in self._read_context_map.items():
if value == self._read_index:
self._read_context_map.pop(key, None)
# Reset seqno_ if it reaches max value
self._seqno += 1
if self._seqno > self._kMaxSeqno:
self._seqno = self._kMinSeqno;
return self._seqno
#end TraceWrite
def TraceRead(self, context, count, read_cb):
if len(self._buf) == 0:
return
# if count = 0, then set equal to the size of _buf
if count == 0:
count = len(self._buf)
if context in self._read_context_map:
# If the read context is present, manipulate the position
# from where we wanna start
offset = self._read_context_map[context] - self._read_index
if not offset > 0:
offset = self._size + offset
read_slice_list = list(itertools.islice(self._buf, offset, len(self._buf)))
entry_count = 0
for entry in read_slice_list:
if entry_count < count:
entry_count += 1
read_cb(entry, entry != self._buf[-1])
else:
break
else:
# Create read context
self._read_context_map[context] = self._read_index
entry_count = 0
for entry in self._buf:
if entry_count < count:
entry_count += 1
read_cb(entry, entry != self._buf[-1])
else:
break
# Update the read index in the read context
offset = self._read_context_map[context] + entry_count;
if offset >= self._size:
self._read_context_map[context] = offset - self._size
else:
self._read_context_map[context] = offset
#end TraceRead
def TraceReadDone(self, context):
self._read_context_map.pop(context, None)
#end class TraceBuffer
class Trace(object):
def __init__(self):
self._buffer_map = {}
self._enable = True
#end __init__
# Public functions
def TraceOn(self):
self._enable = True
#end TraceOn
def TraceOff(self):
self._enable = False
#end TraceOff
def IsTraceOn(self):
return self._enable;
#end IsTraceOn
def TraceBufAdd(self, name, size, enable=True):
# Should we have a default size for the buffer?
if size == 0:
return
if name not in self._buffer_map:
buffer = TraceBuffer(name, size, enable)
self._buffer_map[name] = buffer
#end TraceBufAdd
def TraceBufDelete(self, name):
self._buffer_map.pop(name, None)
#end TraceBufDelete
def TraceBufListGet(self):
return self._buffer_map.keys()
#end TraceBufListGet
def TraceBufOn(self, name):
if name in self._buffer_map:
self._buffer_map[name].TraceOn()
#end TraceBufOn
def TraceBufOff(self, name):
if name in self._buffer_map:
self._buffer_map[name].TraceOff()
#end TraceBufOff
def IsTraceBufOn(self, name):
if name in self._buffer_map:
return self._buffer_map[name].IsTraceOn()
else:
return False
#end IsTraceBufOn
def TraceBufSizeGet(self, name):
if name in self._buffer_map:
return self._buffer_map[name].TraceBufSizeGet()
else:
return 0
#end TraceBufSizeGet
def TraceWrite(self, name, entry):
if name in self._buffer_map:
return self._buffer_map[name].TraceWrite(entry)
#end TraceWrite
def TraceRead(self, name, context, count, read_cb):
if name in self._buffer_map:
self._buffer_map[name].TraceRead(context, count, read_cb)
#end TraceRead
def TraceReadDone(self, name, context):
if name in self._buffer_map:
self._buffer_map[name].TraceReadDone(context)
#end TraceReadDone
#end class Trace
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
internal/jsonmessage/jsonmessage.go
|
// Copyright 2014 Docker authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the DOCKER-LICENSE file.
package jsonmessage
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
units "github.com/docker/go-units"
"github.com/abrechon/go-dockerclient/internal/term"
gotty "github.com/ijc/Gotty"
)
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time isalways the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// JSONError wraps a concrete Code and Message, `Code` is
// is an integer error code, `Message` is the error message.
type JSONError struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func (e *JSONError) Error() string {
return e.Message
}
// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
// Start is the initial value for the operation. Current is the current status and
// value of the progress made towards Total. Total is the end value describing when
// we made 100% progress for an operation.
type JSONProgress struct {
terminalFd uintptr
Current int64 `json:"current,omitempty"`
Total int64 `json:"total,omitempty"`
Start int64 `json:"start,omitempty"`
// If true, don't show xB/yB
HideCounts bool `json:"hidecounts,omitempty"`
Units string `json:"units,omitempty"`
nowFunc func() time.Time
winSize int
}
func (p *JSONProgress) String() string {
var (
width = p.width()
pbBox string
numbersBox string
timeLeftBox string
)
if p.Current <= 0 && p.Total <= 0 {
return ""
}
if p.Total <= 0 {
switch p.Units {
case "":
current := units.HumanSize(float64(p.Current))
return fmt.Sprintf("%8v", current)
default:
return fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
if percentage > 50 {
percentage = 50
}
if width > 110 {
// this number can't be negative gh#7136
numSpaces := 0
if 50-percentage > 0 {
numSpaces = 50 - percentage
}
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
}
switch {
case p.HideCounts:
case p.Units == "": // no units, use bytes
current := units.HumanSize(float64(p.Current))
total := units.HumanSize(float64(p.Total))
numbersBox = fmt.Sprintf("%8v/%v", current, total)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%8v", current)
}
default:
numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := p.now().Sub(time.Unix(p.Start, 0))
perEntry := fromStart / time.Duration(p.Current)
left := time.Duration(p.Total-p.Current) * perEntry
left = (left / time.Second) * time.Second
if width > 50 {
timeLeftBox = " " + left.String()
}
}
return pbBox + numbersBox + timeLeftBox
}
// shim for testing
func (p *JSONProgress) now() time.Time {
if p.nowFunc == nil {
p.nowFunc = func() time.Time {
return time.Now().UTC()
}
}
return p.nowFunc()
}
// shim for testing
func (p *JSONProgress) width() int {
if p.winSize != 0 {
return p.winSize
}
ws, err := term.GetWinsize(p.terminalFd)
if err == nil {
return int(ws.Width)
}
return 200
}
// JSONMessage defines a message struct. It describes
// the created time, where it from, status, ID of the
// message. It's used for docker events.
type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
ProgressMessage string `json:"progress,omitempty"` // deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
ErrorMessage string `json:"error,omitempty"` // deprecated
// Aux contains out-of-band data, such as digests for push signing and image id after building.
Aux *json.RawMessage `json:"aux,omitempty"`
}
/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
type termInfo interface {
Parse(attr string, params ...interface{}) (string, error)
}
type noTermInfo struct{} // canary used when no terminfo.
func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
return "", fmt.Errorf("noTermInfo")
}
func clearLine(out io.Writer, ti termInfo) error {
// el2 (clear whole line) is not exposed by terminfo.
// First clear line from beginning to cursor
if attr, err := ti.Parse("el1"); err == nil {
_, err = fmt.Fprintf(out, "%s", attr)
if err != nil {
return err
}
} else {
_, err := fmt.Fprintf(out, "\x1b[1K")
if err != nil {
return err
}
}
// Then clear line from cursor to end
if attr, err := ti.Parse("el"); err == nil {
_, err = fmt.Fprintf(out, "%s", attr)
if err != nil {
return err
}
} else {
_, err := fmt.Fprintf(out, "\x1b[K")
if err != nil {
return err
}
}
return nil
}
func cursorUp(out io.Writer, ti termInfo, l int) error {
if l == 0 { // Should never be the case, but be tolerant
return nil
}
if attr, err := ti.Parse("cuu", l); err == nil {
_, err = fmt.Fprintf(out, "%s", attr)
if err != nil {
return err
}
} else {
_, err := fmt.Fprintf(out, "\x1b[%dA", l)
if err != nil {
return err
}
}
return nil
}
func cursorDown(out io.Writer, ti termInfo, l int) error {
if l == 0 { // Should never be the case, but be tolerant
return nil
}
if attr, err := ti.Parse("cud", l); err == nil {
_, err = fmt.Fprintf(out, "%s", attr)
if err != nil {
return err
}
} else {
_, err := fmt.Fprintf(out, "\x1b[%dB", l)
if err != nil {
return err
}
}
return nil
}
// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
// is a terminal. If this is the case, it will erase the entire current line
// when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
if jm.Error != nil {
if jm.Error.Code == 401 {
return fmt.Errorf("authentication is required")
}
return jm.Error
}
var endl string
if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
clearLine(out, termInfo)
endl = "\r"
_, err := fmt.Fprint(out, endl)
if err != nil {
return err
}
} else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
_, err := fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
if err != nil {
return err
}
} else if jm.Time != 0 {
_, err := fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
if err != nil {
return err
}
}
if jm.ID != "" {
_, err := fmt.Fprintf(out, "%s: ", jm.ID)
if err != nil {
return err
}
}
if jm.From != "" {
_, err := fmt.Fprintf(out, "(from %s) ", jm.From)
if err != nil {
return err
}
}
if jm.Progress != nil && termInfo != nil {
_, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
if err != nil {
return err
}
} else if jm.ProgressMessage != "" { // deprecated
_, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
if err != nil {
return err
}
} else if jm.Stream != "" {
_, err := fmt.Fprintf(out, "%s%s", jm.Stream, endl)
if err != nil {
return err
}
} else {
_, err := fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
if err != nil {
return err
}
}
return nil
}
// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
// each line and move the cursor while displaying.
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
var (
dec = json.NewDecoder(in)
ids = make(map[string]int)
)
var termInfo termInfo
if isTerminal {
term := os.Getenv("TERM")
if term == "" {
term = "vt102"
}
var err error
if termInfo, err = gotty.OpenTermInfo(term); err != nil {
termInfo = &noTermInfo{}
}
}
for {
diff := 0
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Aux != nil {
if auxCallback != nil {
auxCallback(jm)
}
continue
}
if jm.Progress != nil {
jm.Progress.terminalFd = terminalFd
}
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
line, ok := ids[jm.ID]
if !ok {
// NOTE: This approach of using len(id) to
// figure out the number of lines of history
// only works as long as we clear the history
// when we output something that's not
// accounted for in the map, such as a line
// with no ID.
line = len(ids)
ids[jm.ID] = line
if termInfo != nil {
_, err := fmt.Fprintf(out, "\n")
if err != nil {
return err
}
}
}
diff = len(ids) - line
if termInfo != nil {
if err := cursorUp(out, termInfo, diff); err != nil {
return err
}
}
} else {
// When outputting something that isn't progress
// output, clear the history of previous lines. We
// don't want progress entries from some previous
// operation to be updated (for example, pull -a
// with multiple tags).
ids = make(map[string]int)
}
err := jm.Display(out, termInfo)
if jm.ID != "" && termInfo != nil {
if err := cursorDown(out, termInfo, diff); err != nil {
return err
}
}
if err != nil {
return err
}
}
return nil
}
type stream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}
// DisplayJSONMessagesToStream prints json messages to the output stream
func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error {
return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
inttest/sonobuoy/signetwork_test.go
|
/*
Copyright 2020 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sonobuoy
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
"github.com/avast/retry-go"
"github.com/stretchr/testify/suite"
"k8s.io/client-go/tools/clientcmd"
"github.com/k0sproject/k0s/inttest/common"
)
type NetworkSuite struct {
common.VMSuite
sonoBin string
}
func (s *NetworkSuite) TestSigNetwork() {
s.NoError(s.InitMainController())
s.NoError(s.RunWorkers())
kc, err := s.KubeClient(s.ControllerIP)
s.NoError(err)
err = s.WaitForNodeReady("worker-0", kc)
s.NoError(err)
err = s.WaitForNodeReady("worker-1", kc)
s.NoError(err)
kubeconfigPath := s.dumpKubeConfig()
s.T().Logf("kubeconfig at: %s", kubeconfigPath)
err = os.Setenv("KUBECONFIG", kubeconfigPath)
s.NoError(err)
sonoArgs := []string{
"run",
"--wait=1200", // 20mins
"--plugin=e2e",
"--plugin-env=e2e.E2E_USE_GO_RUNNER=true",
`--e2e-focus=\[sig-network\].*\[Conformance\]`,
`--e2e-skip=\[Serial\]`,
"--e2e-parallel=y",
"--kube-conformance-image-version=v1.20.0",
}
s.T().Log("running sonobuoy, this may take a while")
sonoFinished := make(chan bool)
go func() {
timer := time.NewTicker(30 * time.Second)
defer timer.Stop()
for {
select {
case <-sonoFinished:
return
case <-timer.C:
s.T().Logf("sonobuoy still running, please wait...")
}
}
}()
sonoCmd := exec.Command(s.sonoBin, sonoArgs...)
sonoCmd.Stdout = os.Stdout
sonoCmd.Stderr = os.Stderr
err = sonoCmd.Run()
sonoFinished <- true
if err != nil {
s.T().Logf("error executing sonobouy: %s", err.Error())
}
s.NoError(err)
s.T().Log("sonobuoy has been ran succesfully, collecting results")
results, err := s.retrieveResults()
s.NoError(err)
s.T().Logf("sonobuoy results:%+v", results)
s.Equal("passed", results.Status)
s.Equal(0, results.Failed)
if results.Status != "passed" {
s.T().Logf("sonobuoy run failed, you can see more details on the failing tests with: %s results %s", s.sonoBin, results.ResultPath)
}
}
func (s *NetworkSuite) retrieveResults() (Result, error) {
var resultPath string
err := retry.Do(func() error {
retrieveCmd := exec.Command(s.sonoBin, "retrieve")
retrieveOutput, err := retrieveCmd.Output()
if err != nil {
return err
}
resultPath = strings.Trim(string(retrieveOutput), "\n")
return nil
}, retry.Attempts(3))
if err != nil {
return Result{}, err
}
cwd, err := os.Getwd()
if err != nil {
return Result{}, err
}
resultPath = path.Join(cwd, resultPath)
s.T().Logf("sonobuoy results stored at: %s", resultPath)
resultArgs := []string{
"results",
"--plugin=e2e",
resultPath,
}
resultCmd := exec.Command(s.sonoBin, resultArgs...)
resultOutput, err := resultCmd.CombinedOutput()
if err != nil {
s.T().Logf("sono results output:\n%s", string(resultOutput))
return Result{}, err
}
result, err := ResultFromString(string(resultOutput))
result.ResultPath = resultPath
return result, err
}
func (s *NetworkSuite) dumpKubeConfig() string {
dir, err := ioutil.TempDir("", "sig-network-kubeconfig-")
s.NoError(err)
ssh, err := s.SSH(s.ControllerIP)
s.NoError(err)
defer ssh.Disconnect()
kubeConf, err := ssh.ExecWithOutput("sudo -h 127.0.0.1 cat /var/lib/k0s/pki/admin.conf")
s.NoError(err)
cfg, err := clientcmd.Load([]byte(kubeConf))
s.NoError(err)
cfg.Clusters["local"].Server = fmt.Sprintf("https://%s:%d", s.ControllerIP, 6443)
// Our CA data is valid for localhost, but we need to change that in order to connect from outside
cfg.Clusters["local"].InsecureSkipTLSVerify = true
cfg.Clusters["local"].CertificateAuthorityData = nil
kubeconfigPath := path.Join(dir, "kubeconfig")
err = clientcmd.WriteToFile(*cfg, kubeconfigPath)
s.NoError(err)
return kubeconfigPath
}
func TestVMNetworkSuite(t *testing.T) {
sonoPath := os.Getenv("SONOBUOY_PATH")
if sonoPath == "" {
t.Fatal("SONOBUOY_PATH env needs to be set")
}
s := NetworkSuite{
common.VMSuite{},
sonoPath,
}
suite.Run(t, &s)
}
|
[
"\"SONOBUOY_PATH\""
] |
[] |
[
"SONOBUOY_PATH"
] |
[]
|
["SONOBUOY_PATH"]
|
go
| 1 | 0 | |
rpcv/common/log.py
|
import logging
import os
import structlog
def get_logger(log_name: str = __name__) -> structlog._config.BoundLoggerLazyProxy:
"""Just stubbed out in case we want later configuration."""
stagename = os.environ.get("STAGE", "user")
processors = [
# This performs the initial filtering, so we don't
# evaluate e.g. DEBUG when unnecessary
structlog.stdlib.filter_by_level,
# Adds logger=module_name (e.g __main__)
structlog.stdlib.add_logger_name,
# Adds level=info, debug, etc.
structlog.stdlib.add_log_level,
# Who doesnt like timestamps?
structlog.processors.TimeStamper(fmt="iso"),
# Performs the % string interpolation as expected
structlog.stdlib.PositionalArgumentsFormatter(),
# Include the stack when stack_info=True
structlog.processors.StackInfoRenderer(),
# Include the exception when exc_info=True
# e.g log.exception() or log.warning(exc_info=True)'s behavior
structlog.processors.format_exc_info,
]
if stagename in ["dev", "prod"]:
processors.extend(
[
# Decodes the unicode values in any kv pairs
structlog.processors.UnicodeDecoder(),
# Creates the necessary args, kwargs for log()
structlog.processors.JSONRenderer(indent=2, sort_keys=True),
]
)
else:
processors.append(
# All the pretty colors
structlog.dev.ConsoleRenderer()
)
structlog.configure(
processors=processors,
# Our "event_dict" is explicitly a dict
# There's also structlog.threadlocal.wrap_dict(dict) in some examples
# which keeps global context as well as thread locals
context_class=dict,
# Provides the logging.Logger for the underlaying log call
logger_factory=structlog.stdlib.LoggerFactory(),
# Provides predefined methods - log.debug(), log.info(), etc.
wrapper_class=structlog.stdlib.BoundLogger,
# Caching of our logger
cache_logger_on_first_use=True,
)
logging.basicConfig(level=logging.DEBUG)
logger = structlog.wrap_logger(logging.getLogger(log_name))
logger.setLevel(logging.DEBUG) # Good old aws screwing things up
return logger
|
[] |
[] |
[
"STAGE"
] |
[]
|
["STAGE"]
|
python
| 1 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend eves received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a eved or Eve-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the eve data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Eve/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Eve")
return os.path.expanduser("~/.eve")
def read_bitcoin_config(dbdir):
"""Read the eve.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "eve.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a eve JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19886 if testnet else 9886
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the eved we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(eved):
info = eved.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
eved.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = eved.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(eved):
address_summary = dict()
address_to_account = dict()
for info in eved.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = eved.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = eved.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-eve-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(eved, fromaddresses, toaddress, amount, fee):
all_coins = list_available(eved)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to eved.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = eved.createrawtransaction(inputs, outputs)
signed_rawtx = eved.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(eved, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = eved.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(eved, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = eved.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(eved, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get eves from")
parser.add_option("--to", dest="to", default=None,
help="address to get send eves to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of eve.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
eved = connect_JSON(config)
if options.amount is None:
address_summary = list_available(eved)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(eved) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(eved, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(eved, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = eved.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
mineman/pkg/app/app.go
|
package app
import (
"context"
"os"
"path"
"syscall"
"github.com/euiko/tooyoul/mineman/pkg/app/api"
"github.com/euiko/tooyoul/mineman/pkg/config"
"github.com/euiko/tooyoul/mineman/pkg/log"
"github.com/euiko/tooyoul/mineman/pkg/runner"
)
type App struct {
config config.Config
name string
hook Hook
injectedVals []interface{}
modules []api.Module
}
var registry ModuleRegistry
func (a *App) Inject(vals ...interface{}) {
a.injectedVals = append(a.injectedVals, vals...)
}
func (a *App) Run() error {
// initialize logger
l := log.NewLogrusLogger()
ctx := log.InjectContext(context.Background(), l)
ctx, cancel := context.WithCancel(ctx)
// load config
viperOpts := []config.ViperOptions{}
homeDir := os.Getenv("HOME")
if homeDir != "" {
viperOpts = append(viperOpts,
config.ViperPaths(homeDir),
config.ViperPaths(path.Join(homeDir, ".config", a.name)),
)
}
a.config = config.NewViper(a.name, viperOpts...)
// load logger options
l.Init(ctx, a.config)
defer l.Close(ctx)
log.SetDefault(l)
runner.Run(ctx, runner.OperationFunc(func(ctx context.Context) error {
log.Trace("running application...")
err := a.run(ctx)
if err != nil {
log.Error("running app error", log.WithError(err))
}
cancel()
return err
})).OnSignal(runner.SignalHandlerFunc(func(ctx context.Context, sig os.Signal) {
if sig == syscall.SIGHUP {
return
}
for _, m := range a.modules {
if err := m.Close(ctx); err != nil {
log.Error("error when closing modules", log.WithError(err))
return
}
}
if err := a.hook.Close(ctx); err != nil {
log.Error("error when closing hook", log.WithError(err))
return
}
cancel()
log.Trace("application closed")
})).Wait(ctx)
return nil
}
func (a *App) run(ctx context.Context) error {
moduleFactories := registry.LoadMap()
modules := []api.Module{}
log.Trace("initalizing hook...")
if err := a.hook.Init(ctx, a.config); err != nil {
return err
}
defer a.hook.Close(ctx)
log.Trace("hook initialized")
// instantiate all modules
log.Trace("loading modules...")
for n, f := range moduleFactories {
enabled := true
m := f()
if h, ok := a.hook.(HookModuleInterceptor); ok {
enabled = h.Intercept(n, m)
}
if !enabled {
continue
}
// call module loaded hook
if ext, ok := a.hook.(HookModuleExt); ok {
ext.ModuleLoaded(ctx, m)
}
modules = append(modules, m)
}
a.modules = modules
log.Trace("%d modules loaded", log.WithValues(len(modules)))
// calls modules init
log.Trace("initializing modules...")
for _, m := range a.modules {
if err := m.Init(ctx, a.config); err != nil {
return err
}
if ext, ok := a.hook.(HookModuleExt); ok {
ext.ModuleInitialized(ctx, m)
}
// inject values after module initialized
defer func(m api.Module) {
if err := m.Close(ctx); err != nil {
log.Error("error while closing module", log.WithError(err))
}
}(m)
}
log.Trace("modules initialized")
log.Trace("running hook")
defer log.Trace("hook run done")
waiter := a.hook.Run(ctx)
if waiter == nil {
return nil
}
err := <-waiter.Wait()
return err
}
func New(name string, hooks ...Hook) *App {
return &App{
name: name,
hook: &chainedHook{hooks: hooks},
}
}
func RegisterModule(name string, factory ModuleFactory) {
registry.Register(name, factory)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cli_040_mean_sea_level_rise/contents/src/__init__.py
|
import logging
import sys
import os
import time
from collections import OrderedDict
import cartosql
import requests
import datetime
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth
# do you want to delete everything currently in the Carto table when you run this script?
CLEAR_TABLE_FIRST = False
# Carto username and API key for account where we will store the data
CARTO_USER = os.getenv('CARTO_USER')
CARTO_KEY = os.getenv('CARTO_KEY')
# Earthdata username and API key for account to access source url
EARTHDATA_USER = os.getenv('EARTHDATA_USER')
EARTHDATA_KEY = os.getenv('EARTHDATA_KEY')
# name of table in Carto where we will upload the data
CARTO_TABLE = 'cli_040_mean_sea_level_rise'
# column of table that can be used as a unique ID (UID)
UID_FIELD = 'date'
# column that stores datetime information
TIME_FIELD = 'date'
# column names and types for data table
# column names should be lowercase
# column types should be one of the following: geometry, text, numeric, timestamp
CARTO_SCHEMA = OrderedDict([
('altimeter_type', 'numeric'),
('merged_file_cycle', 'numeric'),
('date', 'timestamp'),
('num_obs', 'numeric'),
('num_weighted_obs', 'numeric'),
('gmsl_no_gia', 'numeric'),
('sd_gmsl_no_gia', 'numeric'),
('gauss_filt_gmsl_no_gia', 'numeric'),
('gmsl_gia', 'numeric'),
('sd_gmsl_gia', 'numeric'),
('gauss_filt_gmsl_gia', 'numeric'),
('gauss_filt_gmsl_gia_ann_signal_removed', 'numeric')
])
# how many rows can be stored in the Carto table before the oldest ones are deleted?
MAX_ROWS = 1000000
# oldest date that can be stored in the Carto table before we start deleting
MAX_AGE = datetime.datetime.today() - datetime.timedelta(days=365*150)
# url for sea level rise data
SOURCE_URL = "https://podaac-tools.jpl.nasa.gov/drive/files/allData/merged_alt/L2/TP_J1_OSTM/global_mean_sea_level/"
# Resource Watch dataset API ID
# Important! Before testing this script:
# Please change this ID OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on a different dataset on Resource Watch
DATASET_ID = 'f655d9b2-ea32-4753-9556-182fc6d3156b'
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = f'http://api.resourcewatch.org/v1/dataset/{dataset}'
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR CARTO DATASETS
The functions below must go in every near real-time script for a Carto dataset.
Their format should not need to be changed.
'''
def checkCreateTable(table, schema, id_field, time_field=''):
'''
Create the table if it does not exist, and pull list of IDs already in the table if it does
INPUT table: Carto table to check or create (string)
schema: dictionary of column names and types, used if we are creating the table for the first time (dictionary)
id_field: name of column that we want to use as a unique ID for this table; this will be used to compare the
source data to the our table each time we run the script so that we only have to pull data we
haven't previously uploaded (string)
time_field: optional, name of column that will store datetime information (string)
RETURN list of existing IDs in the table, pulled from the id_field column (list of strings)
'''
# check it the table already exists in Carto
if cartosql.tableExists(table, user=CARTO_USER, key=CARTO_KEY):
# if the table does exist, get a list of all the values in the id_field column
logging.info('Fetching existing IDs')
r = cartosql.getFields(id_field, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of strings, removing the first and last entries (header and an empty space at end)
return r.text.split('\r\n')[1:-1]
else:
# if the table does not exist, create it with columns based on the schema input
logging.info('Table {} does not exist, creating'.format(table))
cartosql.createTable(table, schema, user=CARTO_USER, key=CARTO_KEY)
# if a unique ID field is specified, set it as a unique index in the Carto table; when you upload data, Carto
# will ensure no two rows have the same entry in this column and return an error if you try to upload a row with
# a duplicate unique ID
if id_field:
cartosql.createIndex(table, id_field, unique=True, user=CARTO_USER, key=CARTO_KEY)
# if a time_field is specified, set it as an index in the Carto table; this is not a unique index
if time_field:
cartosql.createIndex(table, time_field, user=CARTO_USER, key=CARTO_KEY)
# return an empty list because there are no IDs in the new table yet
return []
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def cleanOldRows(table, time_field, max_age, date_format='%Y-%m-%d %H:%M:%S'):
'''
Delete rows that are older than a certain threshold
INPUT table: name of table in Carto from which we will delete the old data (string)
time_field: column that stores datetime information (string)
max_age: oldest date that can be stored in the Carto table (datetime object)
date_format: format of dates in Carto table (string)
RETURN num_expired: number of rows that have been dropped from the table (integer)
'''
# initialize number of rows that will be dropped as 0
num_expired = 0
# if the table exists
if cartosql.tableExists(table, CARTO_USER, CARTO_KEY):
# check if max_age variable is a datetime object
if isinstance(max_age, datetime.datetime):
# convert datetime object to string formatted according to date_format
max_age = max_age.strftime(date_format)
elif isinstance(max_age, str):
# raise an error if max_age is a string
logging.error('Max age must be expressed as a datetime.datetime object')
# delete rows from table which are older than the max_age
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age), CARTO_USER, CARTO_KEY)
# get the number of rows that were dropped from the table
num_expired = r.json()['total_rows']
else:
# raise an error if the table doesn't exist
logging.error("{} table does not exist yet".format(table))
return(num_expired)
def fetchDataFileName(url):
'''
Get the filename from source url for which we want to download data
INPUT url: source url to download data (string)
RETURN filename: filename for source data (string)
'''
# pull website content from the source url where data for sea level rise is stored
r = requests.get(url, auth=HTTPBasicAuth(EARTHDATA_USER, EARTHDATA_KEY), stream=True)
# use BeautifulSoup to read the content as a nested data structure
soup = BeautifulSoup(r.text, 'html.parser')
# create a boolean variable which will be set to "True" once the desired file is found
already_found= False
# extract all the <a> tags within the html content. The <a> tags are used to mark links, so
# we will be able to find the files available for download marked with these tags.
for item in soup.findAll('a'):
# if one of the links available to download is a text file & doesn't contain the word 'README'
if item['href'].endswith(".txt") and ("README" not in item['href']):
if already_found:
logging.warning("There are multiple filenames which match criteria, passing most recent")
# get the filename
filename = item['href'].split('/')[-1]
# set this variable to "True" since we found the desired file
already_found= True
if already_found:
# if successful, log that the filename was found successfully
logging.info("Selected filename: {}".format(filename))
else:
# if unsuccessful, log an error that the filename was not found
logging.warning("No valid filename found")
return(filename)
def deleteExcessRows(table, max_rows, time_field):
'''
Delete rows to bring count down to max_rows
INPUT table: name of table in Carto from which we will delete excess rows (string)
max_rows: maximum rows that can be stored in the Carto table (integer)
time_field: column that stores datetime information (string)
RETURN num_dropped: number of rows that have been dropped from the table (integer)
'''
# initialize number of rows that will be dropped as 0
num_dropped = 0
# get cartodb_ids from carto table sorted by date (new->old)
r = cartosql.getFields('cartodb_id', table, order='{} desc'.format(time_field),
f='csv', user=CARTO_USER, key=CARTO_KEY)
# turn response into a list of strings of the ids
ids = r.text.split('\r\n')[1:-1]
# if number of rows is greater than max_rows, delete excess rows
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[max_rows:], CARTO_USER, CARTO_KEY)
# get the number of rows that have been dropped from the table
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
return(num_dropped)
def tryRetrieveData(url, filename, timeout=300, encoding='utf-8'):
'''
Download data from the source
INPUT url: source url to download data (string)
filename: filename for source data (string)
timeout: how many seconds we will wait to get the data from url (integer)
encoding: encoding of the url content (string)
RETURN res_rows: list of lines in the source data file (list of strings)
'''
# set the start time as the current time so that we can time how long it takes to pull the data (returns the number of seconds passed since epoch)
start = time.time()
# elapsed time is initialized with zero
elapsed = 0
# generate the url to pull data for this file
resource_location = os.path.join(url, filename)
# try to fetch data from generated url while elapsed time is less than the allowed time
while elapsed < timeout:
# measures the elapsed time since start
elapsed = time.time() - start
try:
with requests.get(resource_location, auth=HTTPBasicAuth(EARTHDATA_USER, EARTHDATA_KEY), stream=True) as f:
# split the lines at line boundaries and get the original string from the encoded string
res_rows = f.content.decode(encoding).splitlines()
return(res_rows)
except:
logging.error("Unable to retrieve resource on this attempt.")
# if the request fails, wait 5 seconds before moving on to the next attempt to fetch the data
time.sleep(5)
# after failing to fetch data within the allowed time, log that the data could not be fetched
logging.error("Unable to retrive resource before timeout of {} seconds".format(timeout))
return([])
def decimalToDatetime(dec, date_pattern="%Y-%m-%d %H:%M:%S"):
'''
Convert a decimal representation of a year to a desired string representation
For example: 2016.5 -> 2016-06-01 00:00:00
useful resource: https://stackoverflow.com/questions/20911015/decimal-years-to-datetime-in-python
INPUT dec: decimal representation of a year (string)
date_pattern: format in which we want to convert the input date to (string)
RETURN result: date formatted according to date_pattern (string)
'''
# convert the date from string to float
dec = float(dec)
# convert the date from float to integer to separate out the year (i.e. 2016.5 -> 2016)
year = int(dec)
# get the decimal part of the date (i.e. 2016.5 -> 0.5)
rem = dec - year
# create a datetime object for the 1st of January of the year
base = datetime.datetime(year, 1, 1)
# generate a complete datetime object to include month, day and time
dt = base + datetime.timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)
# convert datetime object to string formatted according to date_pattern
result = dt.strftime(date_pattern)
return(result)
def insertIfNew(newUID, newValues, existing_ids, new_data):
'''
For data pulled from the source data file, check whether it is already in our table. If not, add it to the queue for processing
INPUT newUID: date for the current row of data (string)
newValues: date and other retrieved variables for current row of data (list of strings)
existing_ids: list of date IDs that we already have in our Carto table (list of strings)
new_data: dictionary of new data to be added to Carto, in which the key is the date and the value
is a list of strings containing the date and other retrieved variables for new data (dictionary)
RETURN new_data: updated dictionary of new data to be added to Carto, in which the input newValues have been added (dictionary)
'''
# get dates that are already in the table along with the new dates that are already processed
seen_ids = existing_ids + list(new_data.keys())
# if the current new date is not in the existing table and has not processed yet, add it to the dictionary of new data
if newUID not in seen_ids:
new_data[newUID] = newValues
logging.debug("Adding {} data to table".format(newUID))
else:
logging.debug("{} data already in table".format(newUID))
return(new_data)
def processData(url, filename, existing_ids):
'''
Fetch, process and upload new data
INPUT url: url where you can find the download link for the source data (string)
filename: filename for source data (string)
existing_ids: list of date IDs that we already have in our Carto table (list of strings)
RETURN num_new: number of rows of new data sent to Carto table (integer)
'''
num_new = 0
# get the data from source as a list of strings, with each string holding one line from the source data file
res_rows = tryRetrieveData(url, filename)
# create an empty dictionary to store new data (data that's not already in our Carto table)
new_data = {}
# go through each line of content retrieved from source
for row in res_rows:
# get dates by processing lines that come after the header (header lines start with "HDR")
if not (row.startswith("HDR")):
# split line by space to get dates
row = row.split()
# if length of contents in row matches the length of CARTO_SCHEMA
if len(row)==len(CARTO_SCHEMA):
logging.debug("Processing row: {}".format(row))
# get date by accessing the third element in the list of row
date = decimalToDatetime(row[2])
# replace decimal date with datetime in data row
row[2] = date
# For new date, check whether this is already in our table.
# If not, add it to the queue for processing
new_data = insertIfNew(date, row, existing_ids, new_data)
else:
logging.debug("Skipping row: {}".format(row))
# if we have found new dates to process
if len(new_data):
num_new += len(new_data)
# create a list of new data
new_data = list(new_data.values())
# insert new data into the carto table
cartosql.blockInsertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_data, user=CARTO_USER, key=CARTO_KEY)
return(num_new)
def get_most_recent_date(table):
'''
Find the most recent date of data in the specified Carto table
INPUT table: name of table in Carto we want to find the most recent date for (string)
RETURN most_recent_date: most recent date of data in the Carto table, found in the TIME_FIELD column of the table (datetime object)
'''
# get dates in TIME_FIELD column
r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of dates
dates = r.text.split('\r\n')[1:-1]
# sort the dates from oldest to newest
dates.sort()
# turn the last (newest) date into a datetime object
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
def updateResourceWatch(num_new):
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date' and updating any dates on layers
INPUT num_new: number of new rows in Carto table (integer)
'''
# If there are new entries in the Carto table
if num_new>0:
# Update dataset's last update date on Resource Watch
most_recent_date = get_most_recent_date(CARTO_TABLE)
lastUpdateDate(DATASET_ID, most_recent_date)
# Update the dates on layer legends - TO BE ADDED IN FUTURE
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# clear the table before starting, if specified
if CLEAR_TABLE_FIRST:
logging.info("clearing table")
# if the table exists
if cartosql.tableExists(CARTO_TABLE, user=CARTO_USER, key=CARTO_KEY):
# delete all the rows
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=CARTO_USER, key=CARTO_KEY)
# note: we do not delete the entire table because this will cause the dataset visualization on Resource Watch
# to disappear until we log into Carto and open the table again. If we simply delete all the rows, this
# problem does not occur
# Check if table exists, create it if it does not
logging.info('Checking if table exists and getting existing IDs.')
existing_ids = checkCreateTable(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
# Delete rows that are older than a certain threshold
num_expired = cleanOldRows(CARTO_TABLE, TIME_FIELD, MAX_AGE)
# Get the filename from source url for which we want to download data
filename = fetchDataFileName(SOURCE_URL)
# Fetch, process, and upload new data
logging.info('Fetching new data')
num_new = processData(SOURCE_URL, filename, existing_ids)
logging.info('Previous rows: {}, New rows: {}'.format(len(existing_ids), num_new))
# Delete data to get back to MAX_ROWS
num_deleted = deleteExcessRows(CARTO_TABLE, MAX_ROWS, TIME_FIELD)
# Update Resource Watch
updateResourceWatch(num_new)
logging.info("SUCCESS")
|
[] |
[] |
[
"CARTO_USER",
"apiToken",
"EARTHDATA_KEY",
"CARTO_KEY",
"EARTHDATA_USER"
] |
[]
|
["CARTO_USER", "apiToken", "EARTHDATA_KEY", "CARTO_KEY", "EARTHDATA_USER"]
|
python
| 5 | 0 | |
gubernator/view_base.py
|
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
import re
import cloudstorage as gcs
import jinja2
import webapp2
import yaml
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from webapp2_extras import sessions
import filters as jinja_filters
BUCKET_WHITELIST = {
re.match(r'gs://([^/]+)', path).group(1)
for path in yaml.load(open("buckets.yaml"))
}
DEFAULT_JOBS = {
'kubernetes-jenkins/logs/': {
'kubelet-gce-e2e-ci',
'kubernetes-build',
'kubernetes-e2e-gce',
'kubernetes-e2e-gce-scalability',
'kubernetes-e2e-gce-slow',
'kubernetes-e2e-gke',
'kubernetes-e2e-gke-slow',
'kubernetes-kubemark-5-gce',
'kubernetes-kubemark-500-gce',
'kubernetes-test-go',
}
}
PR_PREFIX = 'kubernetes-jenkins/pr-logs/pull'
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates'),
extensions=['jinja2.ext.autoescape'],
trim_blocks=True,
autoescape=True)
JINJA_ENVIRONMENT.line_statement_prefix = '%'
jinja_filters.register(JINJA_ENVIRONMENT.filters)
class BaseHandler(webapp2.RequestHandler):
"""Base class for Handlers that render Jinja templates."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
# The default deadline of 5 seconds is too aggressive of a target for GCS
# directory listing operations.
urlfetch.set_default_fetch_deadline(60)
# This example code is from:
# http://webapp2.readthedocs.io/en/latest/api/webapp2_extras/sessions.html
def dispatch(self):
# pylint: disable=attribute-defined-outside-init
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
def render(self, template, context):
"""Render a context dictionary using a given template."""
template = JINJA_ENVIRONMENT.get_template(template)
self.response.write(template.render(context))
def check_bucket(self, prefix):
if prefix in BUCKET_WHITELIST:
return
if prefix[:prefix.find('/')] not in BUCKET_WHITELIST:
self.abort(404)
class IndexHandler(BaseHandler):
"""Render the index."""
def get(self):
self.render("index.html", {'jobs': DEFAULT_JOBS})
def memcache_memoize(prefix, expires=60 * 60, neg_expires=60):
"""Decorate a function to memoize its results using memcache.
The function must take a single string as input, and return a pickleable
type.
Args:
prefix: A prefix for memcache keys to use for memoization.
expires: How long to memoized values, in seconds.
neg_expires: How long to memoize falsey values, in seconds
Returns:
A decorator closure to wrap the function.
"""
# setting the namespace based on the current version prevents different
# versions from sharing cache values -- meaning there's no need to worry
# about incompatible old key/value pairs
namespace = os.environ['CURRENT_VERSION_ID']
def wrapper(func):
@functools.wraps(func)
def wrapped(arg):
key = '%s%s' % (prefix, arg)
data = memcache.get(key, namespace=namespace)
if data is not None:
return data
else:
data = func(arg)
try:
if data:
memcache.add(key, data, expires, namespace=namespace)
else:
memcache.add(key, data, neg_expires, namespace=namespace)
except ValueError:
logging.exception('unable to write to memcache')
return data
return wrapped
return wrapper
@memcache_memoize('gs-ls://', expires=60)
def gcs_ls(path):
"""Enumerate files in a GCS directory. Returns a list of FileStats."""
if path[-1] != '/':
path += '/'
return list(gcs.listbucket(path, delimiter='/'))
def pad_numbers(s):
"""Modify a string to make its numbers suitable for natural sorting."""
return re.sub(r'\d+', lambda m: m.group(0).rjust(16, '0'), s)
|
[] |
[] |
[
"CURRENT_VERSION_ID"
] |
[]
|
["CURRENT_VERSION_ID"]
|
python
| 1 | 0 | |
queue_services/entity-emailer/src/entity_emailer/worker.py
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unique worker functionality for this service is contained here.
The entry-point is the **cb_subscription_handler**
The design and flow leverage a few constraints that are placed upon it
by NATS Streaming and using AWAIT on the default loop.
- NATS streaming queues require one message to be processed at a time.
- AWAIT on the default loop effectively runs synchronously
If these constraints change, the use of Flask-SQLAlchemy would need to change.
Flask-SQLAlchemy currently allows the base model to be changed, or reworking
the model to a standalone SQLAlchemy usage with an async engine would need
to be pursued.
"""
import json
import os
from http import HTTPStatus
import nats
import requests
from entity_queue_common.service import QueueServiceManager
from entity_queue_common.service_utils import EmailException, QueueException, logger
from flask import Flask
from legal_api import db
from legal_api.services.bootstrap import AccountService
from sentry_sdk import capture_message
from sqlalchemy.exc import OperationalError
from entity_emailer import config
from entity_emailer.email_processors import bn_notification, incorp_notification, mras_notification
qsm = QueueServiceManager() # pylint: disable=invalid-name
APP_CONFIG = config.get_named_config(os.getenv('DEPLOYMENT_ENV', 'production'))
FLASK_APP = Flask(__name__)
FLASK_APP.config.from_object(APP_CONFIG)
db.init_app(FLASK_APP)
async def publish_event(payload: dict):
"""Publish the email message onto the NATS event subject."""
try:
subject = APP_CONFIG.ENTITY_EVENT_PUBLISH_OPTIONS['subject']
await qsm.service.publish(subject, payload)
except Exception as err: # pylint: disable=broad-except; we don't want to fail out the email, so ignore all.
capture_message(f'Queue Publish Event Error: email msg={payload}, error={err}', level='error')
logger.error('Queue Publish Event Error: email msg=%s', payload, exc_info=True)
def send_email(email: dict, token: str):
"""Send the email."""
resp = requests.post(
f'{APP_CONFIG.NOTIFY_API_URL}',
json=email,
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {token}'
}
)
if resp.status_code != HTTPStatus.OK:
# this should log the error and put the email msg back on the queue
raise EmailException('Unsuccessful response when sending email.')
def process_email(email_msg: dict, flask_app: Flask): # pylint: disable=too-many-branches
"""Process the email contained in the submission."""
if not flask_app:
raise QueueException('Flask App not available.')
with flask_app.app_context():
logger.debug('Attempting to process email: %s', email_msg)
token = AccountService.get_bearer_token()
if email_msg['email']['type'] == 'businessNumber':
email = bn_notification.process(email_msg['email'])
send_email(email, token)
elif email_msg['email']['type'] == 'incorporationApplication':
if email_msg['email']['option'] == 'mras':
email = mras_notification.process(email_msg['email'])
else:
email = incorp_notification.process(email_msg['email'], token)
send_email(email, token)
else:
logger.debug('No email to send for: %s', email_msg)
async def cb_subscription_handler(msg: nats.aio.client.Msg):
"""Use Callback to process Queue Msg objects."""
try:
logger.info('Received raw message seq: %s, data= %s', msg.sequence, msg.data.decode())
email_msg = json.loads(msg.data.decode('utf-8'))
logger.debug('Extracted email msg: %s', email_msg)
process_email(email_msg, FLASK_APP)
except OperationalError as err:
logger.error('Queue Blocked - Database Issue: %s', json.dumps(email_msg), exc_info=True)
raise err # We don't want to handle the error, as a DB down would drain the queue
except EmailException as err:
logger.error('Queue Error - email failed to send: %s'
'\n\nThis message has been put back on the queue for reprocessing.',
json.dumps(email_msg), exc_info=True)
raise err # we don't want to handle the error, so that the message gets put back on the queue
except (QueueException, Exception): # pylint: disable=broad-except
# Catch Exception so that any error is still caught and the message is removed from the queue
capture_message('Queue Error: ' + json.dumps(email_msg), level='error')
logger.error('Queue Error: %s', json.dumps(email_msg), exc_info=True)
|
[] |
[] |
[
"DEPLOYMENT_ENV"
] |
[]
|
["DEPLOYMENT_ENV"]
|
python
| 1 | 0 | |
tests/script_tests.py
|
"""Test that each script can be compiled and executed."""
#
# (C) Pywikibot team, 2014-2021
#
# Distributed under the terms of the MIT license.
#
import os
import sys
import unittest
from contextlib import suppress
from pywikibot.tools import has_module
from tests import join_root_path, unittest_print
from tests.aspects import DefaultSiteTestCase, MetaTestCaseClass, PwbTestCase
from tests.utils import execute_pwb
scripts_path = join_root_path('scripts')
# These dependencies are not always the package name which is in setup.py.
# Here, the name given to the module which will be imported is required.
script_deps = {
'commons_information': ['mwparserfromhell'],
'patrol': ['mwparserfromhell'],
'weblinkchecker': ['memento_client'],
}
def check_script_deps(script_name):
"""Detect whether all dependencies are installed."""
if script_name in script_deps:
for package_name in script_deps[script_name]:
if not has_module(package_name):
unittest_print(
"{} depends on {}, which isn't available"
.format(script_name, package_name))
return False
return True
failed_dep_script_set = {name for name in script_deps
if not check_script_deps(name)}
# scripts which cannot be tested
unrunnable_script_set = set()
def list_scripts(path, exclude=None):
"""Return list of scripts in given path."""
scripts = [name[0:-3] for name in os.listdir(path) # strip '.py'
if name.endswith('.py')
and not name.startswith('_') # skip __init__.py and _*
and name != exclude]
return scripts
script_list = (['login']
+ list_scripts(scripts_path, 'login.py'))
runnable_script_list = (
['login'] + sorted(set(script_list) - {'login'} - unrunnable_script_set))
script_input = {
'interwiki': 'Test page that should not exist\n',
'misspelling': 'q\n',
'pagefromfile': 'q\n',
'replace': 'foo\nbar\n\n\n', # match, replacement,
# Enter to begin, Enter for default summary.
'shell': '\n', # exits on end of stdin
'solve_disambiguation': 'Test page\nq\n',
'upload':
'https://upload.wikimedia.org/wikipedia/commons/'
'8/80/Wikipedia-logo-v2.svg\n\n\n',
}
auto_run_script_list = [
'blockpageschecker',
'category_redirect',
'checkimages',
'clean_sandbox',
'login',
'misspelling',
'revertbot',
'noreferences',
'nowcommons',
'parser_function_count',
'patrol',
'shell',
'unusedfiles',
'upload',
'watchlist',
'welcome',
]
# Expected result for no arguments
# Some of these are not pretty, but at least they are informative
# and not backtraces starting deep in the pywikibot package.
no_args_expected_results = {
# TODO: until done here, remember to set editor = None in user-config.py
'change_pagelang': 'No -setlang parameter given',
'checkimages': 'Execution time: 0 seconds',
'harvest_template': 'ERROR: Please specify',
# script_input['interwiki'] above lists a title that should not exist
'interwiki': 'does not exist. Skipping.',
'login': 'Logged in on ',
'pagefromfile': 'Please enter the file name',
'parser_function_count': 'Hold on, this will need some time.',
'replace': 'Press Enter to use this automatic message',
'replicate_wiki':
'error: the following arguments are required: destination',
'shell': ('>>> ', 'Welcome to the'),
'speedy_delete': "does not have 'delete' right for site",
'transferbot': 'Target site not different from source site',
'unusedfiles': ('Working on', None),
'version': 'Pywikibot: [',
'watchlist': 'Retrieving watchlist',
# The following auto-run and typically can't be validated,
# however these strings are very likely to exist within
# the timeout of 5 seconds.
'revertbot': 'Fetching new batch of contributions',
'upload': 'ERROR: Upload error',
}
enable_autorun_tests = (
os.environ.get('PYWIKIBOT_TEST_AUTORUN', '0') == '1')
def collector(loader=unittest.loader.defaultTestLoader):
"""Load the default tests."""
# Note: Raising SkipTest during load_tests will
# cause the loader to fallback to its own
# discover() ordering of unit tests.
if unrunnable_script_set:
unittest_print('Skipping execution of unrunnable scripts:\n {!r}'
.format(unrunnable_script_set))
if not enable_autorun_tests:
unittest_print('Skipping execution of auto-run scripts '
'(set PYWIKIBOT_TEST_AUTORUN=1 to enable):\n {!r}'
.format(auto_run_script_list))
tests = (['test__login']
+ ['test_' + name
for name in sorted(script_list)
if name != 'login'
and name not in unrunnable_script_set
])
test_list = ['tests.script_tests.TestScriptHelp.' + name
for name in tests]
tests = (['test__login']
+ ['test_' + name
for name in sorted(script_list)
if name != 'login'
and name not in failed_dep_script_set
and name not in unrunnable_script_set
and (enable_autorun_tests or name not in auto_run_script_list)
])
test_list += ['tests.script_tests.TestScriptSimulate.' + name
for name in tests]
tests = loader.loadTestsFromNames(test_list)
suite = unittest.TestSuite()
suite.addTests(tests)
return suite
def load_tests(loader=unittest.loader.defaultTestLoader,
tests=None, pattern=None):
"""Load the default modules."""
return collector(loader)
class TestScriptMeta(MetaTestCaseClass):
"""Test meta class."""
def __new__(cls, name, bases, dct):
"""Create the new class."""
def test_execution(script_name, args=None):
if args is None:
args = []
is_autorun = ('-help' not in args
and script_name in auto_run_script_list)
def test_skip_script(self):
raise unittest.SkipTest(
'Skipping execution of auto-run scripts (set '
'PYWIKIBOT_TEST_AUTORUN=1 to enable) "{}"'
.format(script_name))
def testScript(self):
global_args = 'For global options use -help:global or run pwb'
cmd = [script_name] + args
data_in = script_input.get(script_name)
timeout = 5 if is_autorun else None
stdout, error = None, None
if self._results and script_name in self._results:
error = self._results[script_name]
if isinstance(error, tuple):
stdout, error = error
test_overrides = {}
if not hasattr(self, 'net') or not self.net:
test_overrides['pywikibot.Site'] = 'lambda *a, **k: None'
# run the script
result = execute_pwb(cmd, data_in, timeout=timeout,
error=error, overrides=test_overrides)
err_result = result['stderr']
out_result = result['stdout']
stderr_sleep, stderr_other = [], []
for line in err_result.splitlines():
if line.startswith('Sleeping for '):
stderr_sleep.append(line)
else:
stderr_other.append(line)
if stderr_sleep:
unittest_print('\n'.join(stderr_sleep))
if result['exit_code'] == -9:
unittest_print(' killed', end=' ')
if error:
self.assertIn(error, result['stderr'])
exit_codes = [0, 1, 2, -9]
elif not is_autorun:
if not stderr_other:
self.assertIn(global_args, out_result)
else:
self.assertIn('Use -help for further information.',
stderr_other)
self.assertNotIn('-help', args)
exit_codes = [0]
else:
# auto-run
# returncode is 1 if the process is killed
exit_codes = [0, 1, -9]
if not out_result and not err_result:
unittest_print(' auto-run script unresponsive after '
'{} seconds'.format(timeout), end=' ')
elif 'SIMULATION: edit action blocked' in err_result:
unittest_print(' auto-run script simulated edit '
'blocked', end=' ')
else:
unittest_print(
' auto-run script stderr within {} seconds: {!r}'
.format(timeout, err_result), end=' ')
unittest_print(' exit code: {}'
.format(result['exit_code']), end=' ')
self.assertNotIn('Traceback (most recent call last)',
err_result)
self.assertNotIn('deprecated', err_result.lower())
# If stdout doesn't include global help..
if global_args not in out_result:
# Specifically look for deprecated
self.assertNotIn('deprecated', out_result.lower())
# But also complain if there is any stdout
if stdout is not None and out_result:
self.assertIn(stdout, out_result)
else:
self.assertIsEmpty(out_result)
self.assertIn(result['exit_code'], exit_codes)
sys.stdout.flush()
if not enable_autorun_tests and is_autorun:
return test_skip_script
return testScript
arguments = dct['_arguments']
for script_name in script_list:
# force login to be the first, alphabetically, so the login
# message does not unexpectedly occur during execution of
# another script.
# unrunnable script tests are disabled by default in load_tests()
if script_name == 'login':
test_name = 'test__login'
else:
test_name = 'test_' + script_name
cls.add_method(dct, test_name,
test_execution(script_name, arguments.split()),
'Test running {} {}.'
.format(script_name, arguments))
if script_name in dct['_expected_failures']:
dct[test_name] = unittest.expectedFailure(dct[test_name])
elif script_name in dct['_allowed_failures']:
dct[test_name] = unittest.skip(
'{} is in _allowed_failures list'
.format(script_name))(dct[test_name])
elif script_name in failed_dep_script_set \
and arguments == '-simulate':
dct[test_name] = unittest.skip(
'{} has dependencies; skipping'
.format(script_name))(dct[test_name])
# Disable test by default in pytest
if script_name in unrunnable_script_set:
# flag them as an expectedFailure due to py.test (T135594)
dct[test_name] = unittest.expectedFailure(dct[test_name])
dct[test_name].__test__ = False
return super(TestScriptMeta, cls).__new__(cls, name, bases, dct)
class TestScriptHelp(PwbTestCase, metaclass=TestScriptMeta):
"""Test cases for running scripts with -help.
All scripts should not create a Site for -help, so net = False.
"""
net = False
# Here come scripts requiring and missing dependencies, that haven't been
# fixed to output -help in that case.
_expected_failures = {'version'}
_allowed_failures = []
_arguments = '-help'
_results = None
class TestScriptSimulate(DefaultSiteTestCase, PwbTestCase,
metaclass=TestScriptMeta):
"""Test cases for scripts.
This class sets the'user' attribute on every test, thereby ensuring
that the test runner has a username for the default site, and so that
Site.login() is called in the test runner, which means that the scripts
run in pwb can automatically login using the saved cookies.
"""
login = True
_expected_failures = {
'catall', # stdout user interaction
'upload', # raises custom ValueError
}
_allowed_failures = [
'disambredir',
'misspelling', # T94681
'watchlist', # T77965
]
_arguments = '-simulate'
_results = no_args_expected_results
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
|
[] |
[] |
[
"PYWIKIBOT_TEST_AUTORUN"
] |
[]
|
["PYWIKIBOT_TEST_AUTORUN"]
|
python
| 1 | 0 | |
pypy/interpreter/error.py
|
import os, sys
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated
from errno import EINTR
AUTO_DEBUG = os.getenv('PYPY_DEBUG')
RECORD_INTERPLEVEL_TRACEBACK = True
class OperationError(Exception):
"""Interpreter-level exception that signals an exception that should be
sent to the application level.
OperationError instances have three attributes (and no .args),
w_type, _w_value and _application_traceback, which contain the wrapped
type and value describing the exception, and a chained list of
PyTraceback objects making the application-level traceback.
"""
_w_value = None
_application_traceback = None
def __init__(self, w_type, w_value, tb=None):
assert w_type is not None
self.setup(w_type)
self._w_value = w_value
self._application_traceback = tb
def setup(self, w_type):
self.w_type = w_type
if not we_are_translated():
self.debug_excs = []
def clear(self, space):
# for sys.exc_clear()
self.w_type = space.w_None
self._w_value = space.w_None
self._application_traceback = None
if not we_are_translated():
del self.debug_excs[:]
def match(self, space, w_check_class):
"Check if this application-level exception matches 'w_check_class'."
return space.exception_match(self.w_type, w_check_class)
def async(self, space):
"Check if this is an exception that should better not be caught."
return (self.match(space, space.w_SystemExit) or
self.match(space, space.w_KeyboardInterrupt))
def __str__(self):
"NOT_RPYTHON: Convenience for tracebacks."
s = self._w_value
if self.__class__ is not OperationError and s is None:
s = self._compute_value()
return '[%s: %s]' % (self.w_type, s)
def errorstr(self, space, use_repr=False):
"The exception class and value, as a string."
w_value = self.get_w_value(space)
if space is None:
# this part NOT_RPYTHON
exc_typename = str(self.w_type)
exc_value = str(w_value)
else:
w = space.wrap
if space.is_w(space.type(self.w_type), space.w_str):
exc_typename = space.str_w(self.w_type)
else:
exc_typename = space.str_w(
space.getattr(self.w_type, w('__name__')))
if space.is_w(w_value, space.w_None):
exc_value = ""
else:
try:
if use_repr:
exc_value = space.str_w(space.repr(w_value))
else:
exc_value = space.str_w(space.str(w_value))
except OperationError:
# oups, cannot __str__ the exception object
exc_value = "<oups, exception object itself cannot be str'd>"
if not exc_value:
return exc_typename
else:
return '%s: %s' % (exc_typename, exc_value)
def record_interpreter_traceback(self):
"""Records the current traceback inside the interpreter.
This traceback is only useful to debug the interpreter, not the
application."""
if not we_are_translated():
if RECORD_INTERPLEVEL_TRACEBACK:
self.debug_excs.append(sys.exc_info())
def print_application_traceback(self, space, file=None):
"NOT_RPYTHON: Dump a standard application-level traceback."
if file is None: file = sys.stderr
self.print_app_tb_only(file)
print >> file, self.errorstr(space)
def print_app_tb_only(self, file):
"NOT_RPYTHON"
tb = self._application_traceback
if tb:
import linecache
print >> file, "Traceback (application-level):"
while tb is not None:
co = tb.frame.pycode
lineno = tb.get_lineno()
fname = co.co_filename
if fname.startswith('<inline>\n'):
lines = fname.split('\n')
fname = lines[0].strip()
try:
l = lines[lineno]
except IndexError:
l = ''
else:
l = linecache.getline(fname, lineno)
print >> file, " File \"%s\"," % fname,
print >> file, "line", lineno, "in", co.co_name
if l:
if l.endswith('\n'):
l = l[:-1]
l = " " + l.lstrip()
print >> file, l
tb = tb.next
def print_detailed_traceback(self, space=None, file=None):
"""NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
import traceback, cStringIO
if file is None: file = sys.stderr
f = cStringIO.StringIO()
for i in range(len(self.debug_excs)-1, -1, -1):
print >> f, "Traceback (interpreter-level):"
traceback.print_tb(self.debug_excs[i][2], file=f)
f.seek(0)
debug_print(''.join(['|| ' + line for line in f.readlines()]), file)
if self.debug_excs:
from pypy.tool import tb_server
tb_server.publish_exc(self.debug_excs[-1])
self.print_app_tb_only(file)
print >> file, '(application-level)', self.errorstr(space)
if AUTO_DEBUG:
import debug
debug.fire(self)
@jit.unroll_safe
def normalize_exception(self, space):
"""Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
"""
#
# This method covers all ways in which the Python statement
# "raise X, Y" can produce a valid exception type and instance.
#
# In the following table, 'Class' means a subclass of BaseException
# and 'inst' is an instance of either 'Class' or a subclass of it.
# Or 'Class' can also be an old-style class and 'inst' an old-style
# instance of it.
#
# The flow object space only deals with non-advanced case. Old-style
# classes and instances *are* advanced.
#
# input (w_type, w_value)... becomes... advanced case?
# ---------------------------------------------------------------------
# (tuple, w_value) (tuple[0], w_value) yes
# (Class, None) (Class, Class()) no
# (Class, inst) (inst.__class__, inst) no
# (Class, tuple) (Class, Class(*tuple)) yes
# (Class, x) (Class, Class(x)) no
# ("string", ...) ("string", ...) deprecated
# (inst, None) (inst.__class__, inst) no
#
w_type = self.w_type
w_value = self.get_w_value(space)
while space.is_true(space.isinstance(w_type, space.w_tuple)):
w_type = space.getitem(w_type, space.wrap(0))
if space.exception_is_valid_obj_as_class_w(w_type):
# this is for all cases of the form (Class, something)
if space.is_w(w_value, space.w_None):
# raise Type: we assume we have to instantiate Type
w_value = space.call_function(w_type)
w_type = self._exception_getclass(space, w_value)
else:
w_valuetype = space.exception_getclass(w_value)
if space.exception_issubclass_w(w_valuetype, w_type):
# raise Type, Instance: let etype be the exact type of value
w_type = w_valuetype
else:
if space.is_true(space.isinstance(w_value, space.w_tuple)):
# raise Type, tuple: assume the tuple contains the
# constructor args
w_value = space.call(w_type, w_value)
else:
# raise Type, X: assume X is the constructor argument
w_value = space.call_function(w_type, w_value)
w_type = self._exception_getclass(space, w_value)
else:
# the only case left here is (inst, None), from a 'raise inst'.
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("instance exception may not "
"have a separate value"))
w_value = w_inst
w_type = w_instclass
self.w_type = w_type
self._w_value = w_value
def _exception_getclass(self, space, w_inst):
w_type = space.exception_getclass(w_inst)
if not space.exception_is_valid_class_w(w_type):
typename = w_type.getname(space)
msg = ("exceptions must be old-style classes or derived "
"from BaseException, not %s")
raise operationerrfmt(space.w_TypeError, msg, typename)
return w_type
def write_unraisable(self, space, where, w_object=None,
with_traceback=False, extra_line=''):
if w_object is None:
objrepr = ''
else:
try:
objrepr = space.str_w(space.repr(w_object))
except OperationError:
objrepr = '?'
#
try:
if with_traceback:
w_t = self.w_type
w_v = self.get_w_value(space)
w_tb = space.wrap(self.get_traceback())
space.appexec([space.wrap(where),
space.wrap(objrepr),
space.wrap(extra_line),
w_t, w_v, w_tb],
"""(where, objrepr, extra_line, t, v, tb):
import sys, traceback
sys.stderr.write('From %s%s:\\n' % (where, objrepr))
if extra_line:
sys.stderr.write(extra_line)
traceback.print_exception(t, v, tb)
""")
else:
msg = 'Exception %s in %s%s ignored\n' % (
self.errorstr(space, use_repr=True), where, objrepr)
space.call_method(space.sys.get('stderr'), 'write',
space.wrap(msg))
except OperationError:
pass # ignored
def get_w_value(self, space):
w_value = self._w_value
if w_value is None:
value = self._compute_value()
self._w_value = w_value = space.wrap(value)
return w_value
def _compute_value(self):
raise NotImplementedError
def get_traceback(self):
"""Calling this marks the PyTraceback as escaped, i.e. it becomes
accessible and inspectable by app-level Python code. For the JIT.
Note that this has no effect if there are already several traceback
frames recorded, because in this case they are already marked as
escaping by executioncontext.leave() being called with
got_exception=True.
"""
from pypy.interpreter.pytraceback import PyTraceback
tb = self._application_traceback
if tb is not None and isinstance(tb, PyTraceback):
tb.frame.mark_as_escaped()
return tb
def set_traceback(self, traceback):
"""Set the current traceback. It should either be a traceback
pointing to some already-escaped frame, or a traceback for the
current frame. To support the latter case we do not mark the
frame as escaped. The idea is that it will be marked as escaping
only if the exception really propagates out of this frame, by
executioncontext.leave() being called with got_exception=True.
"""
self._application_traceback = traceback
# ____________________________________________________________
# optimization only: avoid the slowest operation -- the string
# formatting with '%' -- in the common case were we don't
# actually need the message. Only supports %s and %d.
_fmtcache = {}
_fmtcache2 = {}
def decompose_valuefmt(valuefmt):
"""Returns a tuple of string parts extracted from valuefmt,
and a tuple of format characters."""
formats = []
parts = valuefmt.split('%')
i = 1
while i < len(parts):
if parts[i].startswith('s') or parts[i].startswith('d'):
formats.append(parts[i][0])
parts[i] = parts[i][1:]
i += 1
elif parts[i] == '': # support for '%%'
parts[i-1] += '%' + parts[i+1]
del parts[i:i+2]
else:
raise ValueError("invalid format string (only %s or %d supported)")
assert len(formats) > 0, "unsupported: no % command found"
return tuple(parts), tuple(formats)
def get_operrcls2(valuefmt):
strings, formats = decompose_valuefmt(valuefmt)
assert len(strings) == len(formats) + 1
try:
OpErrFmt = _fmtcache2[formats]
except KeyError:
from rpython.rlib.unroll import unrolling_iterable
attrs = ['x%d' % i for i in range(len(formats))]
entries = unrolling_iterable(enumerate(attrs))
#
class OpErrFmt(OperationError):
def __init__(self, w_type, strings, *args):
self.setup(w_type)
assert len(args) == len(strings) - 1
self.xstrings = strings
for i, attr in entries:
setattr(self, attr, args[i])
assert w_type is not None
def _compute_value(self):
lst = [None] * (len(formats) + len(formats) + 1)
for i, attr in entries:
string = self.xstrings[i]
value = getattr(self, attr)
lst[i+i] = string
lst[i+i+1] = str(value)
lst[-1] = self.xstrings[-1]
return ''.join(lst)
#
_fmtcache2[formats] = OpErrFmt
return OpErrFmt, strings
def get_operationerr_class(valuefmt):
try:
result = _fmtcache[valuefmt]
except KeyError:
result = _fmtcache[valuefmt] = get_operrcls2(valuefmt)
return result
get_operationerr_class._annspecialcase_ = 'specialize:memo'
def operationerrfmt(w_type, valuefmt, *args):
"""Equivalent to OperationError(w_type, space.wrap(valuefmt % args)).
More efficient in the (common) case where the value is not actually
needed."""
OpErrFmt, strings = get_operationerr_class(valuefmt)
return OpErrFmt(w_type, strings, *args)
operationerrfmt._annspecialcase_ = 'specialize:arg(1)'
# ____________________________________________________________
# Utilities
from rpython.tool.ansi_print import ansi_print
def debug_print(text, file=None, newline=True):
# 31: ANSI color code "red"
ansi_print(text, esc="31", file=file, newline=newline)
try:
WindowsError
except NameError:
_WINDOWS = False
else:
_WINDOWS = True
def wrap_windowserror(space, e, w_filename=None):
from rpython.rlib import rwin32
winerror = e.winerror
try:
msg = rwin32.FormatError(winerror)
except ValueError:
msg = 'Windows Error %d' % winerror
exc = space.w_WindowsError
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg))
return OperationError(exc, w_error)
def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError',
w_exception_class=None):
assert isinstance(e, OSError)
if _WINDOWS and isinstance(e, WindowsError):
return wrap_windowserror(space, e, w_filename)
errno = e.errno
if errno == EINTR:
space.getexecutioncontext().checksignals()
try:
msg = os.strerror(errno)
except ValueError:
msg = 'error %d' % errno
if w_exception_class is None:
exc = getattr(space, exception_name)
else:
exc = w_exception_class
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg))
return OperationError(exc, w_error)
wrap_oserror2._annspecialcase_ = 'specialize:arg(3)'
def wrap_oserror(space, e, filename=None, exception_name='w_OSError',
w_exception_class=None):
if filename is not None:
return wrap_oserror2(space, e, space.wrap(filename),
exception_name=exception_name,
w_exception_class=w_exception_class)
else:
return wrap_oserror2(space, e, None,
exception_name=exception_name,
w_exception_class=w_exception_class)
wrap_oserror._annspecialcase_ = 'specialize:arg(3)'
def exception_from_errno(space, w_type):
from rpython.rlib.rposix import get_errno
errno = get_errno()
msg = os.strerror(errno)
w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg))
return OperationError(w_type, w_error)
def new_exception_class(space, name, w_bases=None, w_dict=None):
"""Create a new exception type.
@param name: the name of the type.
@param w_bases: Either an exception type, or a wrapped tuple of
exception types. default is space.w_Exception.
@param w_dict: an optional dictionary to populate the class __dict__.
"""
if '.' in name:
module, name = name.rsplit('.', 1)
else:
module = None
if w_bases is None:
w_bases = space.newtuple([space.w_Exception])
elif not space.isinstance_w(w_bases, space.w_tuple):
w_bases = space.newtuple([w_bases])
if w_dict is None:
w_dict = space.newdict()
w_exc = space.call_function(
space.w_type, space.wrap(name), w_bases, w_dict)
if module:
space.setattr(w_exc, space.wrap("__module__"), space.wrap(module))
return w_exc
def typed_unwrap_error_msg(space, expected, w_obj):
type_name = space.type(w_obj).getname(space)
return space.wrap("expected %s, got %s object" % (expected, type_name))
|
[] |
[] |
[
"PYPY_DEBUG"
] |
[]
|
["PYPY_DEBUG"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "frigg.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/ray/tests/test_failure_3.py
|
import os
import sys
import signal
import ray
import numpy as np
import pytest
import time
from ray._private.test_utils import SignalActor, wait_for_pid_to_exit
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
def test_worker_exit_after_parent_raylet_dies(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=8, resources={"foo": 1})
cluster.wait_for_nodes()
ray.init(address=cluster.address)
@ray.remote(resources={"foo": 1})
class Actor():
def get_worker_pid(self):
return os.getpid()
def get_raylet_pid(self):
return int(os.environ["RAY_RAYLET_PID"])
actor = Actor.remote()
worker_pid = ray.get(actor.get_worker_pid.remote())
raylet_pid = ray.get(actor.get_raylet_pid.remote())
# Kill the parent raylet.
os.kill(raylet_pid, SIGKILL)
os.waitpid(raylet_pid, 0)
wait_for_pid_to_exit(raylet_pid)
# Make sure the worker process exits as well.
wait_for_pid_to_exit(worker_pid)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(5):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# Need a barrier here to ensure ordering between the async and sync call.
# Otherwise ref2 could be executed prior to ref1.
for i in range(100):
if ray.get(signal.cur_num_waiters.remote()) > 0:
break
time.sleep(.1)
assert ray.get(signal.cur_num_waiters.remote()) > 0
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
[] |
[] |
[
"RAY_RAYLET_PID"
] |
[]
|
["RAY_RAYLET_PID"]
|
python
| 1 | 0 | |
internal/logic/conf/conf.go
|
package conf
import (
"flag"
"fmt"
"os"
"strconv"
"time"
"github.com/bilibili/discovery/naming"
xtime "github.com/Terry-Mao/goim/pkg/time"
"github.com/BurntSushi/toml"
)
var (
confPath string
region string
zone string
deployEnv string
host string
weight int64
// Conf config
Conf *Config
)
func init() {
var (
defHost, _ = os.Hostname()
defWeight, _ = strconv.ParseInt(os.Getenv("WEIGHT"), 10, 32)
)
flag.StringVar(&confPath, "conf", "logic-example.toml", "default config path")
flag.StringVar(®ion, "region", os.Getenv("REGION"), "avaliable region. or use REGION env variable, value: sh etc.")
flag.StringVar(&zone, "zone", os.Getenv("ZONE"), "avaliable zone. or use ZONE env variable, value: sh001/sh002 etc.")
flag.StringVar(&deployEnv, "deploy.env", os.Getenv("DEPLOY_ENV"), "deploy env. or use DEPLOY_ENV env variable, value: dev/fat1/uat/pre/prod etc.")
flag.StringVar(&host, "host", defHost, "machine hostname. or use default machine hostname.")
flag.Int64Var(&weight, "weight", defWeight, "load balancing weight, or use WEIGHT env variable, value: 10 etc.")
}
// Init init config.
func Init() (err error) {
Conf = Default()
_, err = toml.DecodeFile(confPath, &Conf)
fmt.Println(Conf.Env)
return
}
// Default new a config with specified defualt value.
func Default() *Config {
return &Config{
Env: &Env{Region: region, Zone: zone, DeployEnv: deployEnv, Host: host, Weight: weight},
Discovery: &naming.Config{Region: region, Zone: zone, Env: deployEnv, Host: host},
HTTPServer: &HTTPServer{
Network: "tcp",
Addr: "3111",
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
},
RPCClient: &RPCClient{Dial: xtime.Duration(time.Second), Timeout: xtime.Duration(time.Second)},
RPCServer: &RPCServer{
Network: "tcp",
Addr: "3119",
Timeout: xtime.Duration(time.Second),
IdleTimeout: xtime.Duration(time.Second * 60),
MaxLifeTime: xtime.Duration(time.Hour * 2),
ForceCloseWait: xtime.Duration(time.Second * 20),
KeepAliveInterval: xtime.Duration(time.Second * 60),
KeepAliveTimeout: xtime.Duration(time.Second * 20),
},
Backoff: &Backoff{MaxDelay: 300, BaseDelay: 3, Factor: 1.8, Jitter: 1.3},
}
}
// Config config.
type Config struct {
Env *Env
Discovery *naming.Config
RPCClient *RPCClient
RPCServer *RPCServer
HTTPServer *HTTPServer
Kafka *Kafka
Redis *Redis
Node *Node
Backoff *Backoff
Regions map[string][]string
}
// Env is env config.
type Env struct {
Region string
Zone string
DeployEnv string
Host string
Weight int64
}
// Node node config.
type Node struct {
DefaultDomain string
HostDomain string
TCPPort int
WSPort int
WSSPort int
HeartbeatMax int
Heartbeat xtime.Duration
RegionWeight float64
}
// Backoff backoff.
type Backoff struct {
MaxDelay int32
BaseDelay int32
Factor float32
Jitter float32
}
// Redis .
type Redis struct {
Network string
Addr string
Auth string
Active int
Idle int
DialTimeout xtime.Duration
ReadTimeout xtime.Duration
WriteTimeout xtime.Duration
IdleTimeout xtime.Duration
Expire xtime.Duration
}
// Kafka .
type Kafka struct {
Topic string
Brokers []string
}
// RPCClient is RPC client config.
type RPCClient struct {
Dial xtime.Duration
Timeout xtime.Duration
}
// RPCServer is RPC server config.
type RPCServer struct {
Network string
Addr string
Timeout xtime.Duration
IdleTimeout xtime.Duration
MaxLifeTime xtime.Duration
ForceCloseWait xtime.Duration
KeepAliveInterval xtime.Duration
KeepAliveTimeout xtime.Duration
}
// HTTPServer is http server config.
type HTTPServer struct {
Network string
Addr string
ReadTimeout xtime.Duration
WriteTimeout xtime.Duration
}
|
[
"\"WEIGHT\"",
"\"REGION\"",
"\"ZONE\"",
"\"DEPLOY_ENV\""
] |
[] |
[
"ZONE",
"WEIGHT",
"REGION",
"DEPLOY_ENV"
] |
[]
|
["ZONE", "WEIGHT", "REGION", "DEPLOY_ENV"]
|
go
| 4 | 0 | |
pkg/kubepkg/kubepkg.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubepkg
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/blang/semver"
gogithub "github.com/google/go-github/v33/github"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/release/pkg/command"
"k8s.io/release/pkg/github"
"k8s.io/release/pkg/kubepkg/options"
"k8s.io/release/pkg/release"
"k8s.io/release/pkg/util"
)
type ChannelType string
const (
ChannelRelease ChannelType = "release"
ChannelTesting ChannelType = "testing"
ChannelNightly ChannelType = "nightly"
minimumKubernetesVersion = "1.13.0"
CurrentCNIVersion = "0.8.7"
MinimumCNIVersion = "0.8.6"
kubeadmConf = "10-kubeadm.conf"
)
var (
minimumCRIToolsVersion = minimumKubernetesVersion
buildArchMap = map[string]map[options.BuildType]string{
"amd64": {
"deb": "amd64",
"rpm": "x86_64",
},
"arm": {
"deb": "armhf",
"rpm": "armhfp",
},
"arm64": {
"deb": "arm64",
"rpm": "aarch64",
},
"ppc64le": {
"deb": "ppc64el",
"rpm": "ppc64le",
},
"s390x": {
"deb": "s390x",
"rpm": "s390x",
},
}
builtins = map[string]interface{}{
"date": func() string {
return time.Now().Format(time.RFC1123Z)
},
}
)
type Client struct {
options *options.Options
impl Impl
}
func New(o *options.Options) *Client {
return &Client{
options: o,
impl: &impl{},
}
}
func (c *Client) SetImpl(impl Impl) {
c.impl = impl
}
type impl struct{}
//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate
//counterfeiter:generate . Impl
type Impl interface {
RunSuccessWithWorkDir(workDir, cmd string, args ...string) error
Releases(owner, repo string, includePrereleases bool) ([]*gogithub.RepositoryRelease, error)
GetKubeVersion(versionType release.VersionType) (string, error)
ReadFile(string) ([]byte, error)
WriteFile(string, []byte, os.FileMode) error
}
func (i *impl) RunSuccessWithWorkDir(workDir, cmd string, args ...string) error {
return command.NewWithWorkDir(workDir, cmd, args...).RunSuccess()
}
func (i *impl) Releases(owner, repo string, includePrereleases bool) ([]*gogithub.RepositoryRelease, error) {
return github.New().Releases(owner, repo, includePrereleases)
}
func (i *impl) GetKubeVersion(versionType release.VersionType) (string, error) {
return release.NewVersion().GetKubeVersion(versionType)
}
func (i *impl) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func (i *impl) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
type Build struct {
Type options.BuildType
Package string
Definitions []*PackageDefinition
TemplateDir string
}
type PackageDefinition struct {
Name string
Version string
Revision string
Channel ChannelType
KubernetesVersion string
Dependencies map[string]string
DownloadLinkBase string
KubeadmKubeletConfigFile string
CNIVersion string
CNIDownloadLink string
}
type buildConfig struct {
*PackageDefinition
Type options.BuildType
GoArch string
BuildArch string
Package string
TemplateDir string
workspace string
specOnly bool
}
func (c *Client) ConstructBuilds() ([]Build, error) {
logrus.Infof("Constructing builds...")
builds := []Build{}
for _, pkg := range c.options.Packages() {
// TODO: Get package directory for any version once package definitions are broken out
packageTemplateDir := filepath.Join(c.options.TemplateDir(), string(c.options.BuildType()), pkg)
if _, err := os.Stat(packageTemplateDir); err != nil {
return nil, errors.Wrap(err, "finding package template dir")
}
b := &Build{
Type: c.options.BuildType(),
Package: pkg,
TemplateDir: packageTemplateDir,
}
for _, channel := range c.options.Channels() {
packageDef := &PackageDefinition{
Revision: c.options.Revision(),
Channel: ChannelType(channel),
}
packageDef.KubernetesVersion = c.options.KubeVersion()
switch b.Package {
case "kubelet":
packageDef.CNIVersion = c.options.CNIVersion()
case "kubernetes-cni":
packageDef.Version = c.options.CNIVersion()
case "cri-tools":
packageDef.Version = c.options.CRIToolsVersion()
}
b.Definitions = append(b.Definitions, packageDef)
}
builds = append(builds, *b)
}
logrus.Infof("Successfully constructed builds")
return builds, nil
}
func (c *Client) WalkBuilds(builds []Build) (err error) {
logrus.Infof("Walking builds...")
workingDir := os.Getenv("KUBEPKG_WORKING_DIR")
if workingDir == "" {
workingDir, err = ioutil.TempDir("", "kubepkg")
if err != nil {
return err
}
}
for _, arch := range c.options.Architectures() {
for _, build := range builds {
for _, packageDef := range build.Definitions {
if err := c.buildPackage(build, packageDef, arch, workingDir); err != nil {
return err
}
}
}
}
if c.options.SpecOnly() {
logrus.Infof("Package specs have been saved in %s", workingDir)
}
logrus.Infof("Successfully walked builds")
return nil
}
func (c *Client) buildPackage(build Build, packageDef *PackageDefinition, arch, tmpDir string) error {
if packageDef == nil {
return errors.New("package definition cannot be nil")
}
pd := &PackageDefinition{}
*pd = *packageDef
bc := &buildConfig{
PackageDefinition: pd,
Type: build.Type,
Package: build.Package,
GoArch: arch,
TemplateDir: build.TemplateDir,
workspace: tmpDir,
specOnly: c.options.SpecOnly(),
}
bc.Name = build.Package
var err error
if bc.KubernetesVersion != "" {
logrus.Infof("Checking if user-supplied Kubernetes version (%s) is valid semver...", bc.KubernetesVersion)
kubeSemver, err := util.TagStringToSemver(bc.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "user-supplied Kubernetes version is not valid semver")
}
kubeVersionString := kubeSemver.String()
kubeVersionParts := strings.Split(kubeVersionString, ".")
switch {
case len(kubeVersionParts) > 4:
logrus.Info("User-supplied Kubernetes version is a CI version")
logrus.Info("Setting channel to nightly")
bc.Channel = ChannelNightly
case len(kubeVersionParts) == 4:
logrus.Info("User-supplied Kubernetes version is a pre-release version")
logrus.Info("Setting channel to testing")
bc.Channel = ChannelTesting
default:
logrus.Info("User-supplied Kubernetes version is a release version")
logrus.Info("Setting channel to release")
bc.Channel = ChannelRelease
}
}
bc.KubernetesVersion, err = c.GetKubernetesVersion(pd)
if err != nil {
return errors.Wrap(err, "getting Kubernetes version")
}
bc.DownloadLinkBase, err = c.GetDownloadLinkBase(pd)
if err != nil {
return errors.Wrap(err, "getting Kubernetes download link base")
}
logrus.Infof("Kubernetes download link base: %s", bc.DownloadLinkBase)
// For cases where a CI build version of Kubernetes is retrieved, replace instances
// of "+" with "-", so that we build with a valid Debian package version.
bc.KubernetesVersion = strings.Replace(bc.KubernetesVersion, "+", "-", 1)
bc.Version, err = c.GetPackageVersion(pd)
if err != nil {
return errors.Wrap(err, "getting package version")
}
logrus.Infof("%s package version: %s", bc.Name, bc.Version)
bc.Dependencies, err = GetDependencies(pd)
if err != nil {
return errors.Wrap(err, "getting dependencies")
}
bc.KubeadmKubeletConfigFile = kubeadmConf
bc.BuildArch = getBuildArch(bc.GoArch, bc.Type)
bc.CNIVersion, err = GetCNIVersion(pd)
if err != nil {
return errors.Wrap(err, "getting CNI version")
}
bc.CNIDownloadLink, err = GetCNIDownloadLink(pd.Version, bc.GoArch)
if err != nil {
return errors.Wrap(err, "getting CNI download link")
}
logrus.Infof("Building %s package for %s/%s architecture...", bc.Package, bc.GoArch, bc.BuildArch)
return c.run(bc)
}
func (c *Client) run(bc *buildConfig) error {
workspaceInfo, err := os.Stat(bc.workspace)
if err != nil {
return err
}
specDir := filepath.Join(bc.workspace, string(bc.Channel), bc.Package)
specDirWithArch := filepath.Join(specDir, bc.GoArch)
if err := os.MkdirAll(specDirWithArch, workspaceInfo.Mode()); err != nil {
return err
}
// TODO: keepTmp/cleanup needs to defined in kubepkg root
if !bc.specOnly {
defer os.RemoveAll(specDirWithArch)
}
if _, err := buildSpecs(bc, specDirWithArch); err != nil {
return err
}
if bc.specOnly {
logrus.Info("Spec-only mode was selected; kubepkg will now exit without building packages")
return nil
}
// TODO: Move OS-specific logic into their own files
switch bc.Type {
case options.BuildDeb:
logrus.Infof("Running dpkg-buildpackage for %s (%s/%s)", bc.Package, bc.GoArch, bc.BuildArch)
if err := c.impl.RunSuccessWithWorkDir(
specDirWithArch,
"dpkg-buildpackage",
"--unsigned-source",
"--unsigned-changes",
"--build=binary",
"--host-arch",
bc.BuildArch,
); err != nil {
return errors.Wrap(err, "running debian package build")
}
fileName := fmt.Sprintf(
"%s_%s-%s_%s.deb",
bc.Package,
bc.Version,
bc.Revision,
bc.BuildArch,
)
dstPath := filepath.Join("bin", string(bc.Channel), fileName)
logrus.Infof("Using package destination path %s", dstPath)
if err := os.MkdirAll(filepath.Dir(dstPath), os.FileMode(0o777)); err != nil {
return errors.Wrapf(err, "creating %s", filepath.Dir(dstPath))
}
srcPath := filepath.Join(specDir, fileName)
input, err := c.impl.ReadFile(srcPath)
if err != nil {
return errors.Wrapf(err, "reading %s", srcPath)
}
err = c.impl.WriteFile(dstPath, input, os.FileMode(0o644))
if err != nil {
return errors.Wrapf(err, "writing file to %s", dstPath)
}
logrus.Infof("Successfully built %s", dstPath)
case options.BuildRpm:
logrus.Info("Building rpms via kubepkg is not currently supported")
}
return nil
}
func (c *Client) GetPackageVersion(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
logrus.Infof("Setting version for %s package...", packageDef.Name)
switch packageDef.Name {
case "kubernetes-cni":
return GetCNIVersion(packageDef)
case "cri-tools":
return c.GetCRIToolsVersion(packageDef)
}
logrus.Infof(
"Using Kubernetes version %s for %s package",
packageDef.KubernetesVersion, packageDef.Name,
)
return util.TrimTagPrefix(packageDef.KubernetesVersion), nil
}
func (c *Client) GetKubernetesVersion(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
if packageDef.KubernetesVersion != "" {
logrus.Infof("Using Kubernetes version (%s) for %s package", packageDef.KubernetesVersion, packageDef.Name)
return packageDef.KubernetesVersion, nil
}
switch packageDef.Channel {
case ChannelTesting:
return c.impl.GetKubeVersion(release.VersionTypeStablePreRelease)
case ChannelNightly:
return c.impl.GetKubeVersion(release.VersionTypeCILatestCross)
}
return c.impl.GetKubeVersion(release.VersionTypeStable)
}
func GetCNIVersion(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
// TODO: Ensure version is not less than MinimumCNIVersion
logrus.Infof("Getting CNI version...")
if packageDef.CNIVersion != "" {
cniSemVer, err := util.TagStringToSemver(packageDef.CNIVersion)
if err != nil {
return "", errors.Wrap(err, "parsing CNI version")
}
minCNISemVer, err := util.TagStringToSemver(MinimumCNIVersion)
if err != nil {
return "", errors.Wrap(err, "parsing CNI version")
}
if cniSemVer.LT(minCNISemVer) {
return "", errors.Errorf("specified CNI version (%s) cannot be lower than %s", packageDef.CNIVersion, MinimumCNIVersion)
}
logrus.Infof("Setting CNI version to %s", packageDef.CNIVersion)
return packageDef.CNIVersion, nil
}
logrus.Infof("Setting CNI version to %s", MinimumCNIVersion)
return MinimumCNIVersion, nil
}
func (c *Client) GetCRIToolsVersion(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
if packageDef.Version != "" {
return packageDef.Version, nil
}
kubeSemver, err := util.TagStringToSemver(packageDef.KubernetesVersion)
if err != nil {
return "", err
}
logrus.Infof("Getting CRI version...")
kubeVersionString := kubeSemver.String()
kubeVersionParts := strings.Split(kubeVersionString, ".")
criToolsMajor := kubeVersionParts[0]
criToolsMinor := kubeVersionParts[1]
// CRI tools releases are not published until after the corresponding Kubernetes release.
// In instances where the Kubernetes version selected is a pre-release or CI build version, // we need to build from the previous minor version of CRI tools instead.
//
// Example:
// Kubernetes version: 1.18.0-alpha.1
// Initial CRI tools version: 1.18.0-alpha.1
// Modified CRI tools version: 1.17.0
if len(kubeVersionParts) >= 4 {
criToolsMinorInt, err := strconv.Atoi(criToolsMinor)
if err != nil {
return "", err
}
criToolsMinorInt--
criToolsMinor = strconv.Itoa(criToolsMinorInt)
}
criToolsVersion := fmt.Sprintf("%s.%s.0", criToolsMajor, criToolsMinor)
releases, err := c.impl.Releases("kubernetes-sigs", "cri-tools", false)
if err != nil {
return "", err
}
var tags []string
for _, release := range releases {
criToolsReleaseTag := util.TrimTagPrefix(*release.TagName)
criToolsReleaseVersionParts := strings.Split(criToolsReleaseTag, ".")
criToolsReleaseMinor := criToolsReleaseVersionParts[1]
if criToolsReleaseMinor == criToolsMinor {
tags = append(tags, criToolsReleaseTag)
}
}
for _, tag := range tags {
tagSemver, err := semver.Parse(tag)
if err != nil {
return "", errors.Wrap(err, "could not parse tag semver")
}
criToolsSemver, err := semver.Parse(criToolsVersion)
if err != nil {
return "", errors.Wrap(err, "could not parse CRI tools semver")
}
if tagSemver.GTE(criToolsSemver) {
criToolsVersion = tag
}
}
logrus.Infof("Setting CRI tools version to %s", criToolsVersion)
return criToolsVersion, nil
}
func (c *Client) GetDownloadLinkBase(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
if packageDef.Channel == ChannelNightly {
return c.GetCIBuildsDownloadLinkBase(packageDef)
}
return GetDefaultReleaseDownloadLinkBase(packageDef)
}
func (c *Client) GetCIBuildsDownloadLinkBase(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
ciVersion := packageDef.KubernetesVersion
if ciVersion == "" {
var err error
ciVersion, err = c.impl.GetKubeVersion(release.VersionTypeCILatestCross)
if err != nil {
return "", err
}
}
return fmt.Sprintf("https://dl.k8s.io/ci/%s", util.AddTagPrefix(ciVersion)), nil
}
func GetDefaultReleaseDownloadLinkBase(packageDef *PackageDefinition) (string, error) {
if packageDef == nil {
return "", errors.New("package definition cannot be nil")
}
return fmt.Sprintf(
"%s/%s",
options.DefaultReleaseDownloadLinkBase,
util.AddTagPrefix(packageDef.KubernetesVersion),
), nil
}
func GetDependencies(packageDef *PackageDefinition) (map[string]string, error) {
if packageDef == nil {
return nil, errors.New("package definition cannot be nil")
}
deps := make(map[string]string)
switch packageDef.Name {
case "kubelet":
deps["kubernetes-cni"] = MinimumCNIVersion
case "kubeadm":
deps["kubelet"] = minimumKubernetesVersion
deps["kubectl"] = minimumKubernetesVersion
deps["kubernetes-cni"] = MinimumCNIVersion // deb based kubeadm still requires kubernetes-cni
deps["cri-tools"] = minimumCRIToolsVersion
}
return deps, nil
}
func getBuildArch(goArch string, buildType options.BuildType) string {
return buildArchMap[goArch][buildType]
}
func GetCNIDownloadLink(version, arch string) (string, error) {
if _, err := util.TagStringToSemver(version); err != nil {
return "", errors.Wrap(err, "parsing CNI version")
}
return fmt.Sprintf("https://storage.googleapis.com/k8s-artifacts-cni/release/v%s/cni-plugins-linux-%s-v%s.tgz", version, arch, version), nil
}
|
[
"\"KUBEPKG_WORKING_DIR\""
] |
[] |
[
"KUBEPKG_WORKING_DIR"
] |
[]
|
["KUBEPKG_WORKING_DIR"]
|
go
| 1 | 0 | |
src/main/java/no/nav/pto/veilarbportefolje/VeilarbportefoljeApp.java
|
package no.nav.pto.veilarbportefolje;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import javax.annotation.PostConstruct;
import java.util.Optional;
import java.util.TimeZone;
@SpringBootApplication
@ServletComponentScan
public class VeilarbportefoljeApp {
@PostConstruct
public void init() {
TimeZone.setDefault(TimeZone.getTimeZone(Optional.ofNullable(System.getenv("TZ")).orElse("Europe/Oslo")));
}
public static void main(String... args) {
SpringApplication.run(VeilarbportefoljeApp.class, args);
}
}
|
[
"\"TZ\""
] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
java
| 1 | 0 | |
providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name_test.go
|
package aws
import (
"fmt"
"os"
"regexp"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/aws-sdk-go-base/tfawserr"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestAccAWSAPIGatewayDomainName_CertificateArn(t *testing.T) {
rootDomain := testAccAwsAcmCertificateDomainFromEnv(t)
domain := testAccAwsAcmCertificateRandomSubDomain(rootDomain)
var domainName apigateway.DomainName
acmCertificateResourceName := "aws_acm_certificate.test"
resourceName := "aws_api_gateway_domain_name.test"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPreCheckApigatewayEdgeDomainName(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAWSAPIGatewayEdgeDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_CertificateArn(rootDomain, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayEdgeDomainNameExists(resourceName, &domainName),
testAccCheckResourceAttrRegionalARNApigatewayEdgeDomainName(resourceName, "arn", "apigateway", domain),
resource.TestCheckResourceAttrPair(resourceName, "certificate_arn", acmCertificateResourceName, "arn"),
resource.TestMatchResourceAttr(resourceName, "cloudfront_domain_name", regexp.MustCompile(`[a-z0-9]+.cloudfront.net`)),
resource.TestCheckResourceAttr(resourceName, "cloudfront_zone_id", "Z2FDTNDATAQYW2"),
resource.TestCheckResourceAttrPair(resourceName, "domain_name", acmCertificateResourceName, "domain_name"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccAWSAPIGatewayDomainName_CertificateName(t *testing.T) {
certificateBody := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_BODY")
if certificateBody == "" {
t.Skip(
"Environment variable AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_BODY is not set. " +
"This environment variable must be set to any non-empty value " +
"with a publicly trusted certificate body to enable the test.")
}
certificateChain := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_CHAIN")
if certificateChain == "" {
t.Skip(
"Environment variable AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_CHAIN is not set. " +
"This environment variable must be set to any non-empty value " +
"with a chain certificate acceptable for the certificate to enable the test.")
}
certificatePrivateKey := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_PRIVATE_KEY")
if certificatePrivateKey == "" {
t.Skip(
"Environment variable AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_PRIVATE_KEY is not set. " +
"This environment variable must be set to any non-empty value " +
"with a private key of a publicly trusted certificate to enable the test.")
}
domainName := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_DOMAIN_NAME")
if domainName == "" {
t.Skip(
"Environment variable AWS_API_GATEWAY_DOMAIN_NAME_DOMAIN_NAME is not set. " +
"This environment variable must be set to any non-empty value " +
"with a domain name acceptable for the certificate to enable the test.")
}
var conf apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_CertificateName(domainName, certificatePrivateKey, certificateBody, certificateChain),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &conf),
testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)),
resource.TestCheckResourceAttr(resourceName, "certificate_name", "tf-acc-apigateway-domain-name"),
resource.TestCheckResourceAttrSet(resourceName, "cloudfront_domain_name"),
resource.TestCheckResourceAttr(resourceName, "cloudfront_zone_id", "Z2FDTNDATAQYW2"),
resource.TestCheckResourceAttr(resourceName, "domain_name", domainName),
resource.TestCheckResourceAttrSet(resourceName, "certificate_upload_date"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"certificate_body", "certificate_chain", "certificate_private_key"},
},
},
})
}
func TestAccAWSAPIGatewayDomainName_RegionalCertificateArn(t *testing.T) {
var domainName apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8))
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509SelfSignedCertificatePem(key, rName)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_RegionalCertificateArn(rName, key, certificate),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
testAccCheckResourceAttrRegionalARNApigatewayRegionalDomainName(resourceName, "arn", "apigateway", rName),
resource.TestCheckResourceAttr(resourceName, "domain_name", rName),
testAccMatchResourceAttrRegionalHostname(resourceName, "regional_domain_name", "execute-api", regexp.MustCompile(`d-[a-z0-9]+`)),
resource.TestMatchResourceAttr(resourceName, "regional_zone_id", regexp.MustCompile(`^Z`)),
),
},
},
})
}
func TestAccAWSAPIGatewayDomainName_RegionalCertificateName(t *testing.T) {
// For now, use an environment variable to limit running this test
// BadRequestException: Uploading certificates is not supported for REGIONAL.
// See Remarks section of https://docs.aws.amazon.com/apigateway/api-reference/link-relation/domainname-create/
// which suggests this configuration should be possible somewhere, e.g. AWS China?
regionalCertificateArn := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_REGIONAL_CERTIFICATE_NAME_ENABLED")
if regionalCertificateArn == "" {
t.Skip(
"Environment variable AWS_API_GATEWAY_DOMAIN_NAME_REGIONAL_CERTIFICATE_NAME_ENABLED is not set. " +
"This environment variable must be set to any non-empty value " +
"in a region where uploading REGIONAL certificates is allowed " +
"to enable the test.")
}
var domainName apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8))
caKey := tlsRsaPrivateKeyPem(2048)
caCertificate := tlsRsaX509SelfSignedCaCertificatePem(caKey)
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509LocallySignedCertificatePem(caKey, caCertificate, key, "*.terraformtest.com")
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_RegionalCertificateName(rName, key, certificate, caCertificate),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
testAccCheckResourceAttrRegionalARNApigatewayRegionalDomainName(resourceName, "arn", "apigateway", rName),
resource.TestCheckResourceAttr(resourceName, "certificate_body", certificate),
resource.TestCheckResourceAttr(resourceName, "certificate_chain", caCertificate),
resource.TestCheckResourceAttr(resourceName, "certificate_name", "tf-acc-apigateway-domain-name"),
resource.TestCheckResourceAttr(resourceName, "certificate_private_key", key),
resource.TestCheckResourceAttrSet(resourceName, "certificate_upload_date"),
resource.TestCheckResourceAttr(resourceName, "domain_name", rName),
testAccMatchResourceAttrRegionalHostname(resourceName, "regional_domain_name", "execute-api", regexp.MustCompile(`d-[a-z0-9]+`)),
resource.TestMatchResourceAttr(resourceName, "regional_zone_id", regexp.MustCompile(`^Z`)),
),
},
},
})
}
func TestAccAWSAPIGatewayDomainName_SecurityPolicy(t *testing.T) {
var domainName apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8))
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509SelfSignedCertificatePem(key, rName)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_SecurityPolicy(rName, key, certificate, apigateway.SecurityPolicyTls12),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
resource.TestCheckResourceAttr(resourceName, "security_policy", apigateway.SecurityPolicyTls12),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccAWSAPIGatewayDomainName_Tags(t *testing.T) {
var domainName apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8))
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509SelfSignedCertificatePem(key, rName)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfigTags1(rName, key, certificate, "key1", "value1"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
resource.TestCheckResourceAttr(resourceName, "tags.%", "1"),
resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"),
),
},
{
Config: testAccAWSAPIGatewayDomainNameConfigTags2(rName, key, certificate, "key1", "value1updated", "key2", "value2"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
resource.TestCheckResourceAttr(resourceName, "tags.%", "2"),
resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"),
resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"),
),
},
{
Config: testAccAWSAPIGatewayDomainNameConfigTags1(rName, key, certificate, "key2", "value2"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
resource.TestCheckResourceAttr(resourceName, "tags.%", "1"),
resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccAWSAPIGatewayDomainName_disappears(t *testing.T) {
var domainName apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8))
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509SelfSignedCertificatePem(key, rName)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_RegionalCertificateArn(rName, key, certificate),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName),
testAccCheckResourceDisappears(testAccProvider, resourceAwsApiGatewayDomainName(), resourceName),
),
ExpectNonEmptyPlan: true,
},
},
})
}
func TestAccAWSAPIGatewayDomainName_MutualTlsAuthentication(t *testing.T) {
rootDomain := testAccAwsAcmCertificateDomainFromEnv(t)
domain := testAccAwsAcmCertificateRandomSubDomain(rootDomain)
var v apigateway.DomainName
resourceName := "aws_api_gateway_domain_name.test"
acmCertificateResourceName := "aws_acm_certificate.test"
s3BucketObjectResourceName := "aws_s3_bucket_object.test"
rName := acctest.RandomWithPrefix("tf-acc-test")
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthentication(rootDomain, domain, rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &v),
testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)),
resource.TestCheckResourceAttrPair(resourceName, "domain_name", acmCertificateResourceName, "domain_name"),
resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.#", "1"),
resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.0.truststore_uri", fmt.Sprintf("s3://%s/%s", rName, rName)),
resource.TestCheckResourceAttrPair(resourceName, "mutual_tls_authentication.0.truststore_version", s3BucketObjectResourceName, "version_id"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
// Test disabling mutual TLS authentication.
{
Config: testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthenticationMissing(rootDomain, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &v),
resource.TestCheckResourceAttrPair(resourceName, "domain_name", acmCertificateResourceName, "domain_name"),
resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.#", "0"),
),
},
},
})
}
func testAccCheckAWSAPIGatewayDomainNameExists(n string, res *apigateway.DomainName) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No API Gateway DomainName ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).apigatewayconn
req := &apigateway.GetDomainNameInput{
DomainName: aws.String(rs.Primary.ID),
}
describe, err := conn.GetDomainName(req)
if err != nil {
return err
}
if *describe.DomainName != rs.Primary.ID {
return fmt.Errorf("APIGateway DomainName not found")
}
*res = *describe
return nil
}
}
func testAccCheckAWSAPIGatewayDomainNameDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).apigatewayconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_api_gateway_domain_name" {
continue
}
_, err := conn.GetDomainName(&apigateway.GetDomainNameInput{
DomainName: aws.String(rs.Primary.ID),
})
if err != nil {
if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") {
return nil
}
return err
}
return fmt.Errorf("API Gateway Domain Name still exists: %s", rs.Primary.ID)
}
return nil
}
func testAccCheckAWSAPIGatewayEdgeDomainNameExists(resourceName string, domainName *apigateway.DomainName) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("not found: %s", resourceName)
}
if rs.Primary.ID == "" {
return fmt.Errorf("resource ID not set")
}
conn := testAccProviderApigatewayEdgeDomainName.Meta().(*AWSClient).apigatewayconn
input := &apigateway.GetDomainNameInput{
DomainName: aws.String(rs.Primary.ID),
}
output, err := conn.GetDomainName(input)
if err != nil {
return fmt.Errorf("error reading API Gateway Domain Name (%s): %w", rs.Primary.ID, err)
}
*domainName = *output
return nil
}
}
func testAccCheckAWSAPIGatewayEdgeDomainNameDestroy(s *terraform.State) error {
conn := testAccProviderApigatewayEdgeDomainName.Meta().(*AWSClient).apigatewayconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_api_gateway_domain_name" {
continue
}
input := &apigateway.GetDomainNameInput{
DomainName: aws.String(rs.Primary.ID),
}
output, err := conn.GetDomainName(input)
if tfawserr.ErrCodeEquals(err, apigateway.ErrCodeNotFoundException) {
continue
}
if err != nil {
return fmt.Errorf("error reading API Gateway Domain Name (%s): %w", rs.Primary.ID, err)
}
if output != nil && aws.StringValue(output.DomainName) == rs.Primary.ID {
return fmt.Errorf("API Gateway Domain Name (%s) still exists", rs.Primary.ID)
}
}
return nil
}
func testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain string) string {
return fmt.Sprintf(`
data "aws_route53_zone" "test" {
name = %[1]q
private_zone = false
}
resource "aws_acm_certificate" "test" {
domain_name = %[2]q
validation_method = "DNS"
}
#
# for_each acceptance testing requires:
# https://github.com/hashicorp/terraform-plugin-sdk/issues/536
#
# resource "aws_route53_record" "test" {
# for_each = {
# for dvo in aws_acm_certificate.test.domain_validation_options: dvo.domain_name => {
# name = dvo.resource_record_name
# record = dvo.resource_record_value
# type = dvo.resource_record_type
# }
# }
# allow_overwrite = true
# name = each.value.name
# records = [each.value.record]
# ttl = 60
# type = each.value.type
# zone_id = data.aws_route53_zone.test.zone_id
# }
resource "aws_route53_record" "test" {
allow_overwrite = true
name = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_name
records = [tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_value]
ttl = 60
type = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_type
zone_id = data.aws_route53_zone.test.zone_id
}
resource "aws_acm_certificate_validation" "test" {
certificate_arn = aws_acm_certificate.test.arn
validation_record_fqdns = [aws_route53_record.test.fqdn]
}
`, rootDomain, domain)
}
func testAccAWSAPIGatewayDomainNameConfig_CertificateArn(rootDomain string, domain string) string {
return composeConfig(
testAccApigatewayEdgeDomainNameRegionProviderConfig(),
testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain),
`
resource "aws_api_gateway_domain_name" "test" {
domain_name = aws_acm_certificate.test.domain_name
certificate_arn = aws_acm_certificate_validation.test.certificate_arn
endpoint_configuration {
types = ["EDGE"]
}
}
`)
}
func testAccAWSAPIGatewayDomainNameConfig_CertificateName(domainName, key, certificate, chainCertificate string) string {
return fmt.Sprintf(`
resource "aws_api_gateway_domain_name" "test" {
domain_name = "%[1]s"
certificate_body = "%[2]s"
certificate_chain = "%[3]s"
certificate_name = "tf-acc-apigateway-domain-name"
certificate_private_key = "%[4]s"
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(chainCertificate), tlsPemEscapeNewlines(key))
}
func testAccAWSAPIGatewayDomainNameConfig_RegionalCertificateArn(domainName, key, certificate string) string {
return fmt.Sprintf(`
resource "aws_acm_certificate" "test" {
certificate_body = "%[2]s"
private_key = "%[3]s"
}
resource "aws_api_gateway_domain_name" "test" {
domain_name = %[1]q
regional_certificate_arn = aws_acm_certificate.test.arn
endpoint_configuration {
types = ["REGIONAL"]
}
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key))
}
func testAccAWSAPIGatewayDomainNameConfig_RegionalCertificateName(domainName, key, certificate, chainCertificate string) string {
return fmt.Sprintf(`
resource "aws_api_gateway_domain_name" "test" {
certificate_body = "%[2]s"
certificate_chain = "%[3]s"
certificate_private_key = "%[4]s"
domain_name = "%[1]s"
regional_certificate_name = "tf-acc-apigateway-domain-name"
endpoint_configuration {
types = ["REGIONAL"]
}
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(chainCertificate), tlsPemEscapeNewlines(key))
}
func testAccAWSAPIGatewayDomainNameConfig_SecurityPolicy(domainName, key, certificate, securityPolicy string) string {
return fmt.Sprintf(`
resource "aws_acm_certificate" "test" {
certificate_body = "%[2]s"
private_key = "%[3]s"
}
resource "aws_api_gateway_domain_name" "test" {
domain_name = %[1]q
regional_certificate_arn = aws_acm_certificate.test.arn
security_policy = %[4]q
endpoint_configuration {
types = ["REGIONAL"]
}
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key), securityPolicy)
}
func testAccAWSAPIGatewayDomainNameConfigTags1(domainName, key, certificate, tagKey1, tagValue1 string) string {
return fmt.Sprintf(`
resource "aws_acm_certificate" "test" {
certificate_body = "%[2]s"
private_key = "%[3]s"
}
resource "aws_api_gateway_domain_name" "test" {
domain_name = %[1]q
regional_certificate_arn = aws_acm_certificate.test.arn
endpoint_configuration {
types = ["REGIONAL"]
}
tags = {
%[4]q = %[5]q
}
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key), tagKey1, tagValue1)
}
func testAccAWSAPIGatewayDomainNameConfigTags2(domainName, key, certificate, tagKey1, tagValue1, tagKey2, tagValue2 string) string {
return fmt.Sprintf(`
resource "aws_acm_certificate" "test" {
certificate_body = "%[2]s"
private_key = "%[3]s"
}
resource "aws_api_gateway_domain_name" "test" {
domain_name = %[1]q
regional_certificate_arn = aws_acm_certificate.test.arn
endpoint_configuration {
types = ["REGIONAL"]
}
tags = {
%[4]q = %[5]q
%[6]q = %[7]q
}
}
`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key), tagKey1, tagValue1, tagKey2, tagValue2)
}
func testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthentication(rootDomain, domain, rName string) string {
return composeConfig(
testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain),
fmt.Sprintf(`
resource "aws_s3_bucket" "test" {
bucket = %[1]q
force_destroy = true
versioning {
enabled = true
}
}
resource "aws_s3_bucket_object" "test" {
bucket = aws_s3_bucket.test.id
key = %[1]q
source = "test-fixtures/apigateway-domain-name-truststore-1.pem"
}
resource "aws_api_gateway_domain_name" "test" {
domain_name = aws_acm_certificate.test.domain_name
regional_certificate_arn = aws_acm_certificate_validation.test.certificate_arn
security_policy = "TLS_1_2"
endpoint_configuration {
types = ["REGIONAL"]
}
mutual_tls_authentication {
truststore_uri = "s3://${aws_s3_bucket_object.test.bucket}/${aws_s3_bucket_object.test.key}"
truststore_version = aws_s3_bucket_object.test.version_id
}
}
`, rName))
}
func testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthenticationMissing(rootDomain, domain string) string {
return composeConfig(
testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain),
`
resource "aws_api_gateway_domain_name" "test" {
domain_name = aws_acm_certificate.test.domain_name
regional_certificate_arn = aws_acm_certificate_validation.test.certificate_arn
security_policy = "TLS_1_2"
endpoint_configuration {
types = ["REGIONAL"]
}
}
`)
}
|
[
"\"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_BODY\"",
"\"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_CHAIN\"",
"\"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_PRIVATE_KEY\"",
"\"AWS_API_GATEWAY_DOMAIN_NAME_DOMAIN_NAME\"",
"\"AWS_API_GATEWAY_DOMAIN_NAME_REGIONAL_CERTIFICATE_NAME_ENABLED\""
] |
[] |
[
"AWS_API_GATEWAY_DOMAIN_NAME_REGIONAL_CERTIFICATE_NAME_ENABLED",
"AWS_API_GATEWAY_DOMAIN_NAME_DOMAIN_NAME",
"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_BODY",
"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_PRIVATE_KEY",
"AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_CHAIN"
] |
[]
|
["AWS_API_GATEWAY_DOMAIN_NAME_REGIONAL_CERTIFICATE_NAME_ENABLED", "AWS_API_GATEWAY_DOMAIN_NAME_DOMAIN_NAME", "AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_BODY", "AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_PRIVATE_KEY", "AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_CHAIN"]
|
go
| 5 | 0 | |
make.go
|
// +build ignore
// SILVER - Service Wrapper
//
// Copyright (c) 2014-2021 PaperCut Software http://www.papercut.com/
// Use of this source code is governed by an MIT or GPL Version 2 license.
// See the project's LICENSE file for more information.
//
// This Go make file builds Silver directly from a code checkout, bypassing
// the need to configure/setup a Go workspace.
//
// Run on the command line with:
// $ go run make.go
//
// Other options:
// Run tests:
// $ go run make.go test
//
// Concepts loosely based on concepts in Camlistore
// https://github.com/bradfitz/camlistore
//
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
const (
rootNamespace = "github.com/papercutsoftware/silver"
)
var (
// The project root where this file is located
projectRoot string
//
buildOutputDir string
)
func usage() {
fmt.Println("Usage: go run make.go [flagged args] [non-flagged args]")
fmt.Println("-goos=<operating system> target operating system for silver executable. Default is taken from runtime")
fmt.Println("-goarch=<architecture> target architecture for silver executable. Default is taken from runtime")
fmt.Println("Build action. Can be either 'all'(build all) or 'test'(test all). Default is 'all'")
os.Exit(1)
}
func main() {
goos := flag.String("goos", runtime.GOOS,"Specify target operating system for cross compilation")
goarch := flag.String("goarch", runtime.GOARCH,"Specify target architecture for cross compilation")
flag.Parse()
_ = os.Setenv("GOFLAGS", "-mod=vendor")
_ = os.Setenv("GOOS", *goos)
_ = os.Setenv("GOARCH", *goarch)
var err error
projectRoot, err = os.Getwd()
if err != nil {
panic(fmt.Sprintf("Failed to get current directory: %v\n", err))
}
buildOutputDir = filepath.Join(projectRoot, "build", *goos)
action := "all"
if len(flag.Args()) > 1 {
action = os.Args[1]
}
switch action {
case "all":
buildAll()
case "test":
testAll()
default:
usage()
}
}
func buildAll() {
makeDir(buildOutputDir)
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH")
fmt.Printf("Building binaries for %s/%s ...\n", goos, goarch)
_ = runCmd("go", "build", "-ldflags", "-s -w", "-o", makeOutputPath(buildOutputDir, "updater"), rootNamespace+"/updater")
_ = runCmd("go", "build", "-ldflags", "-s -w", "-o", makeOutputPath(buildOutputDir, "service"), rootNamespace+"/service")
_ = runCmd("go", "build", "-tags", "nohttp", "-ldflags", "-s -w", "-o", makeOutputPath(buildOutputDir, "service-no-http"), rootNamespace+"/service")
if goos == "windows" {
_ = runCmd("go", "build", "-tags", "nohttp", "-ldflags", "-s -w -H=windowsgui", "-o", makeOutputPath(buildOutputDir, "service-no-window"), rootNamespace+"/service")
_ = runCmd("go", "build", "-ldflags", "-s -w -H=windowsgui", "-o", makeOutputPath(buildOutputDir, "updater-no-window"), rootNamespace+"/updater")
}
fmt.Printf("\nCOMPLETE. You'll find the files in:\n '%s'\n", buildOutputDir)
}
func testAll() {
_ = runCmd("go", "test", rootNamespace+"/...")
}
func runCmd(cmd string, arg ...string) error {
c := exec.Command(cmd, arg...)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
if err := c.Run(); err != nil {
return fmt.Errorf("error running command %s: %v", cmd, err)
}
return nil
}
func makeDir(dir string) {
if err := os.MkdirAll(dir, 0755); err != nil {
panic(err)
}
}
func makeOutputPath(dir, name string) string {
goos := os.Getenv("GOOS")
if goos == "windows" {
if !strings.HasSuffix(name, ".exe") {
name = name + ".exe"
}
}
return filepath.Join(dir, name)
}
|
[
"\"GOOS\"",
"\"GOARCH\"",
"\"GOOS\""
] |
[] |
[
"GOARCH",
"GOOS"
] |
[]
|
["GOARCH", "GOOS"]
|
go
| 2 | 0 | |
src/PyGuard/PyGuard.py
|
import aiohttp, codecs
from aiohttp import ClientSession
from codecs import decode, encode
BASE_URL = "http://api.guardiansystem.xyz/v1"
# CHORE: Add request builder and Exception handler
class GuardClient:
"""
PyGuard
-
A Python wrapper for Discord Guardian System API
"""
def __init__(self):
self.credentials = None
def login(self, API_Token: str, id: str):
"""
login to the API using your credentials.
You can apply for credentials at https://guardiansystem.xyz. \n
For security, It is recommended that login information is stored
in a `.env` file.
~@`paramaters`:
`token`: str (required) Your accesss token granted from application
`id`: str (required) Your Application's Discord ID as string.
```
GuardClient.login(
id=os.getenv("CLIENT_ID")
API_Token=os.getenv("API_TOKEN")
)
```
"""
strencode = encode(bytes(id + ":" + API_Token, "utf-8"), 'base64')
self.credentials = decode(strencode, "utf-8")
def credentials(self):
"""
fetches the API_token & Client Id input in `.login()`
"""
return self.credentials
class Request:
def __init__(self):
self.route = None
self.headers = None
self.method = None
self.credentials = GuardClient.credentials()
def setRoute(self, route: str):
"""
Sets the route for the request. Must be an accepted Endpoint.
~@`Routes`:
- `/offenders`: Fetches users in the database \n
Methods: `GET`
- `/servers`: Fetches offending servers \n
Methods: `GET`
- `/links`: Fetches a know scamlink by URL \n
Methods: `GET`
- `/reports`: Submits or Fetches a report & its status \n
Methods: `GET`, `POST`
- `/requests`: Submit or Fetch a request for your Data \n
Methods: `GET`, `POST`
"""
if route.startswith("/"):
if self.method == "GET":
self.route = BASE_URL + route
elif self.method == 'POST':
self.route = route
else: return
else:
if self.method == 'GET':
self.route = BASE_URL + "/" + route
elif self.method == "POST":
self.route = "/" + route
else: return
def setMethod(self, method: str):
"""
Sets the method for the request. Methods must only be that of `GET` or `POST`.
See `.setRoute()` for accepted request methods per route.
"""
accepted = ["GET", "POST"]
if not method.upper() in accepted:
print("PYGUARD ERROR: Invalid method type: {} \nAccepted Methods: \n\"POST\" \n\"GET\"".format(method))
return
else:
self.method = method.upper()
def setHeaders(self):
"""
Sets the header for the response. Providing the credentials from `.login()` for authentication.
"""
self.headers = {
"Method": self.method,
"Host": BASE_URL,
"Path": self.route,
"Protocol": "HTTP/1.1",
"Content-type": "Application/JSON",
"Authorization": "Basic " + self.credentials
}
|
[] |
[] |
[
"API_TOKEN",
"CLIENT_ID"
] |
[]
|
["API_TOKEN", "CLIENT_ID"]
|
python
| 2 | 0 | |
cave/com.raytheon.uf.viz.app.launcher/src/com/raytheon/uf/viz/app/launcher/handlers/AppLauncherHandler.java
|
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.viz.app.launcher.handlers;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import org.eclipse.core.commands.AbstractHandler;
import org.eclipse.core.commands.ExecutionEvent;
import org.eclipse.core.commands.ExecutionException;
import org.eclipse.core.runtime.FileLocator;
import org.eclipse.core.runtime.Path;
import org.eclipse.swt.graphics.Rectangle;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.ui.handlers.HandlerUtil;
import org.osgi.framework.Bundle;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.viz.app.launcher.bundle.Environment;
import com.raytheon.uf.viz.app.launcher.bundle.Launcher;
import com.raytheon.uf.viz.app.launcher.runner.AppRunner;
import com.raytheon.uf.viz.app.launcher.utilities.AppLauncherUtilities;
import com.raytheon.uf.viz.core.VizApp;
/**
* Menu/tool bar command handler that launches an external application.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------- -------- --------- --------------------------------------------
* Mar 10, 2009 2081 mfegan Initial creation.
* Apr 24, 2009 2088 mduff Added varargs to execute method.
* Oct 26, 2016 5969 randerso Add monitor bounds to command line arguments
* Code cleanup.
*
* </pre>
*
* @author mfegan
* @version 1.0
*/
public class AppLauncherHandler extends AbstractHandler {
private final transient IUFStatusHandler statusHandler = UFStatus
.getHandler(AbstractHandler.class);
private static final String LOCALIZATION_ROOT = "LOCALIZATION_ROOT";
private static final String HYDRO_APPS_DIR = "HYDRO_APPS_DIR";
private static final String HYDRO_APPS_DIR_LOC = "/awips2/edex/data/share/hydroapps";
private static final String PGSQL_DRIVER = "PGSQL_DRIVER_DIR";
private static final String PGSQL_DRIVER_LOC = "/lib/dependencies/org.postgres";
private static final String APPS_DIR = "apps_dir";
private static final String EDEX_HOME = "EDEX_HOME";
@Override
public Object execute(ExecutionEvent event) throws ExecutionException {
String bundleLocation = event.getParameter("bundleLocation");
return execute(HandlerUtil.getActiveShell(event), bundleLocation);
}
/**
* Executes the App Launcher for the specified bundle location. This
* override is intended to allow a launching an application from another
* menu handler.
*
* @param shell parent shell
*
* @param bundleLocation
* relative path to the application bundle to launch.
* @param additional
* additional argument to add to the launch
*
* @return always returns null
*
* @throws ExecutionException
* if an error has occurred
*/
public Object execute(Shell shell, String bundleLocation, String... additional)
throws ExecutionException {
try {
Launcher launcher = AppLauncherUtilities
.importLauncherBundle(bundleLocation);
if (additional != null) {
for (int i = 0; i < additional.length; i++) {
if (additional[i] != null) {
launcher.getApplication().addArgument(additional[i]);
}
}
}
Rectangle b = shell.getMonitor().getBounds();
String boundsArg = String.format("-bounds %d,%d,%d,%d", b.x, b.y,
b.width, b.height);
launcher.getApplication().addArgument(boundsArg);
Environment environment = launcher.getSettings().getEnvironment();
/*
* need to add specific values to the environment -- these are
* internal to CAVE
*/
Map<String, String> sysEnv = System.getenv();
String hydroAppsDir = VizApp.getDataDir() + File.separator
+ "hydroapps";
environment.addValue(HYDRO_APPS_DIR, hydroAppsDir);
environment.addValue(APPS_DIR, hydroAppsDir);
String edexHome = sysEnv.get(EDEX_HOME) == null ? hydroAppsDir
: sysEnv.get(EDEX_HOME);
environment.addValue(EDEX_HOME, edexHome);
String pgDriverLoc = "";
for (Bundle bundle : com.raytheon.uf.viz.core.Activator.getDefault()
.getContext().getBundles()) {
if ("org.postgres".equals(bundle.getSymbolicName())) {
try {
URL url = FileLocator.find(bundle, new Path("."), null);
url = FileLocator.resolve(url);
File tmp = new File(url.getPath());
pgDriverLoc = tmp.getCanonicalPath();
} catch (MalformedURLException e) {
statusHandler
.warn("Unable to find Postgres plugin, using default of "
+ hydroAppsDir + "/"
+ PGSQL_DRIVER_LOC);
pgDriverLoc = hydroAppsDir + "/" + PGSQL_DRIVER_LOC;
}
break;
}
}
statusHandler.info(pgDriverLoc);
String driverPath = sysEnv.get(PGSQL_DRIVER) == null ? pgDriverLoc
: sysEnv.get(PGSQL_DRIVER);
environment.addValue(PGSQL_DRIVER, driverPath);
AppRunner runner = new AppRunner(launcher);
runner.execute();
} catch (Exception e) {
statusHandler.error("Error launching application from bundle: "
+ bundleLocation, e);
}
return null;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
entry.go
|
package vesper
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// Extension allows for extending Vesper
type Extension interface {
Name() string
Init(*VM) error
}
// Version - this version of vesper
const Version = "vesper v0.1"
var defaultVM = &VM{
StackSize: defaultStackSize,
Symbols: defaultSymtab,
MacroMap: copyMacros(nil),
ConstantsMap: copyConstantMap(nil),
Constants: copyConstants(nil),
}
var loadPathSymbol = defaultVM.Intern("*load-path*")
// SetFlags - set various flags controlling the vm
func (vm *VM) SetFlags(v bool, d bool, i bool) {
vm.Flags = Flags{
Verbose: v,
Debug: d,
Interactive: i,
}
}
// DefineGlobal binds the value to the global name
func (vm *VM) DefineGlobal(name string, obj *Object) {
sym := vm.Intern(name)
if sym == nil {
fmt.Println("Cannot define a value for this symbol: " + name)
}
vm.defGlobal(sym, obj)
}
func (vm *VM) definePrimitive(name string, prim *Object) {
sym := vm.Intern(name)
vm.defGlobal(sym, prim)
}
// DefineFunction registers a primitive function to the specified global name
func (vm *VM) DefineFunction(name string, fun PrimitiveFunction, result *Object, args ...*Object) {
prim := Primitive(name, fun, result, args, nil, nil, nil)
vm.definePrimitive(name, prim)
}
// DefineFunctionRestArgs registers a primitive function with Rest arguments to the specified global name
func (vm *VM) DefineFunctionRestArgs(name string, fun PrimitiveFunction, result *Object, rest *Object, args ...*Object) {
prim := Primitive(name, fun, result, args, rest, []*Object{}, nil)
vm.definePrimitive(name, prim)
}
// DefineFunctionOptionalArgs registers a primitive function with optional arguments to the specified global name
func (vm *VM) DefineFunctionOptionalArgs(name string, fun PrimitiveFunction, result *Object, args []*Object, defaults ...*Object) {
prim := Primitive(name, fun, result, args, nil, defaults, nil)
vm.definePrimitive(name, prim)
}
// DefineFunctionKeyArgs registers a primitive function with keyword arguments to the specified global name
func (vm *VM) DefineFunctionKeyArgs(name string, fun PrimitiveFunction, result *Object, args []*Object, defaults []*Object, keys []*Object) {
prim := Primitive(name, fun, result, args, nil, defaults, keys)
vm.definePrimitive(name, prim)
}
// DefineMacro registers a primitive macro with the specified name.
func (vm *VM) DefineMacro(name string, fun PrimitiveFunction) {
sym := vm.Intern(name)
prim := Primitive(name, fun, AnyType, []*Object{AnyType}, nil, nil, nil)
vm.defMacro(sym, prim)
}
// GetKeywords - return a slice of Vesper primitive reserved words
func (vm *VM) GetKeywords() []*Object {
//keywords reserved for the base language that Vesper compiles
keywords := []*Object{
vm.Intern("quote"),
vm.Intern("fn"),
vm.Intern("if"),
vm.Intern("do"),
vm.Intern("def"),
vm.Intern("defn"),
vm.Intern("defmacro"),
vm.Intern("set!"),
vm.Intern("code"),
vm.Intern("use"),
}
return keywords
}
// Globals - return a slice of all defined global symbols
func (vm *VM) Globals() []*Object {
var syms []*Object
for _, sym := range vm.Symbols {
if sym.car != nil {
syms = append(syms, sym)
}
}
return syms
}
// GetGlobal - return the global value for the specified symbol, or nil if the symbol is not defined.
func GetGlobal(sym *Object) *Object {
if IsSymbol(sym) {
return sym.car
}
return nil
}
func (vm *VM) defGlobal(sym *Object, val *Object) {
sym.car = val
delete(vm.MacroMap, sym)
}
// IsDefined - return true if the there is a global value defined for the symbol
func IsDefined(sym *Object) bool {
return sym.car != nil
}
func undefGlobal(sym *Object) {
sym.car = nil
}
// Macros - return a slice of all defined macros
func (vm *VM) Macros() []*Object {
keys := make([]*Object, 0, len(vm.MacroMap))
for k := range vm.MacroMap {
keys = append(keys, k)
}
return keys
}
// GetMacro - return the macro for the symbol, or nil if not defined
func (vm *VM) GetMacro(sym *Object) *Macro {
mac, ok := vm.MacroMap[sym]
if !ok {
return nil
}
return mac
}
func (vm *VM) defMacro(sym *Object, val *Object) {
vm.MacroMap[sym] = NewMacro(sym, val)
}
func (vm *VM) putConstant(val *Object) int {
idx, present := vm.ConstantsMap[val]
if !present {
idx = len(vm.Constants)
vm.Constants = append(vm.Constants, val)
vm.ConstantsMap[val] = idx
}
return idx
}
// Use is a synonym for load
func (vm *VM) Use(sym *Object) error {
return vm.Load(sym.text)
}
func (vm *VM) importCode(thunk *Object) (*Object, error) {
var args []*Object
result, err := vm.Execute(thunk.code, args)
if err != nil {
return nil, err
}
return result, nil
}
// FindModuleByName returns the file filename of a vesper module
func FindModuleByName(moduleName string) (string, error) {
loadPath := GetGlobal(loadPathSymbol)
if loadPath == nil {
loadPath = String(".")
}
path := strings.Split(StringValue(loadPath), ":")
name := moduleName
var lname string
if strings.HasSuffix(name, ".vesp") {
lname = name[:len(name)-3] + ".vem"
} else {
lname = name + ".vem"
name = name + ".vsp"
}
for _, dirname := range path {
filename := filepath.Join(dirname, lname)
if IsFileReadable(filename) {
return filename, nil
}
filename = filepath.Join(dirname, name)
if IsFileReadable(filename) {
return filename, nil
}
}
return "", Error(IOErrorKey, "Module not found: ", moduleName)
}
// Load checks for a loadable module and loads it, if it exists
func (vm *VM) Load(name string) error {
file, err := FindModuleFile(name)
if err != nil {
return err
}
return vm.LoadFile(file)
}
// LoadFile loads and executes a file returning any error
func (vm *VM) LoadFile(file string) error {
if vm.Flags.Verbose {
println("; loadFile: " + file)
} else if vm.Flags.Interactive {
println("[loading " + file + "]")
}
fileText, err := SlurpFile(file)
if err != nil {
return err
}
exprs, err := vm.ReadAll(fileText, nil)
if err != nil {
return err
}
for exprs != EmptyList {
expr := Car(exprs)
_, err = vm.Eval(expr)
if err != nil {
return err
}
exprs = Cdr(exprs)
}
return nil
}
// Eval evaluates an expression
func (vm *VM) Eval(expr *Object) (*Object, error) {
if vm.Flags.Debug {
println("; eval: ", Write(expr))
}
expanded, err := vm.macroexpandObject(expr)
if err != nil {
return nil, err
}
if vm.Flags.Debug {
println("; expanded to: ", Write(expanded))
}
code, err := vm.Compile(expanded)
if err != nil {
return nil, err
}
if vm.Flags.Debug {
val := strings.Replace(Write(code), "\n", "\n; ", -1)
println("; compiled to:\n; ", val)
}
return vm.importCode(code)
}
// FindModuleFile finds a readable module file or errors
func FindModuleFile(name string) (string, error) {
i := strings.Index(name, ".")
if i < 0 {
file, err := FindModuleByName(name)
if err != nil {
return "", err
}
return file, nil
}
if !IsFileReadable(name) {
return "", Error(IOErrorKey, "Cannot read file: ", name)
}
return name, nil
}
func (vm *VM) compileObject(expr *Object) (string, error) {
if vm.Flags.Debug {
println("; compile: ", Write(expr))
}
expanded, err := vm.macroexpandObject(expr)
if err != nil {
return "", err
}
if vm.Flags.Debug {
println("; expanded to: ", Write(expanded))
}
thunk, err := vm.Compile(expanded)
if err != nil {
return "", err
}
if vm.Flags.Debug {
println("; compiled to: ", Write(thunk))
}
return thunk.code.decompile(vm, true) + "\n", nil
}
// CompileFile compiles a file and returns a String object or an error
// caveats: when you compile a file, you actually run it. This is so we can handle imports and macros correctly.
func (vm *VM) CompileFile(name string) (*Object, error) {
file, err := FindModuleFile(name)
if err != nil {
return nil, err
}
if vm.Flags.Verbose {
println("; loadFile: " + file)
}
fileText, err := SlurpFile(file)
if err != nil {
return nil, err
}
exprs, err := vm.ReadAll(fileText, nil)
if err != nil {
return nil, err
}
result := ";\n; code generated from " + file + "\n;\n"
var lvm string
for exprs != EmptyList {
expr := Car(exprs)
lvm, err = vm.compileObject(expr)
if err != nil {
return nil, err
}
result += lvm
exprs = Cdr(exprs)
}
return String(result), nil
}
// AddVesperDirectory adds a directory to the load path
func (vm *VM) AddVesperDirectory(dirname string) {
loadPath := dirname
tmp := GetGlobal(loadPathSymbol)
if tmp != nil {
loadPath = dirname + ":" + StringValue(tmp)
}
vm.DefineGlobal(StringValue(loadPathSymbol), String(loadPath))
}
// Init initialise the base environment and extensions
func (vm *VM) Init(extns ...Extension) *VM {
vm.Extensions = extns
loadPath := os.Getenv("VESPER_PATH")
home := os.Getenv("HOME")
if loadPath == "" {
loadPath = ".:./lib"
homelib := filepath.Join(home, "lib/vesper")
_, err := os.Stat(homelib)
if err == nil {
loadPath += ":" + homelib
}
gopath := os.Getenv("GOPATH")
if gopath != "" {
golibdir := filepath.Join(gopath, "src/github.com/robotii/vesper/lib")
_, err := os.Stat(golibdir)
if err == nil {
loadPath += ":" + golibdir
}
}
}
vm.DefineGlobal(StringValue(loadPathSymbol), String(loadPath))
vm.InitPrimitives()
for _, ext := range vm.Extensions {
err := ext.Init(vm)
if err != nil {
Fatal("*** ", err)
}
}
return vm
}
// Run the given files in the vesper vm
func (vm *VM) Run(args ...string) {
for _, filename := range args {
err := vm.Load(filename)
if err != nil {
Fatal("*** ", err.Error())
}
}
}
// Init initialise the base environment and extensions
func Init() {
defaultVM.Init()
}
|
[
"\"VESPER_PATH\"",
"\"HOME\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"VESPER_PATH",
"HOME"
] |
[]
|
["GOPATH", "VESPER_PATH", "HOME"]
|
go
| 3 | 0 | |
holoviews/__init__.py
|
from __future__ import print_function, absolute_import
import os, pydoc
import numpy as np # noqa (API import)
import param
__version__ = param.Version(release=(1,9,2), fpath=__file__,
commit="$Format:%h$", reponame='holoviews')
from .core import archive, config # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.options import (Options, Store, Cycle, # noqa (API import)
Palette, StoreOptions)
from .core.layout import * # noqa (API import)
from .core.element import * # noqa (API import)
from .core.overlay import * # noqa (API import)
from .core.tree import * # noqa (API import)
from .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import)
GridSpace, GridMatrix)
from .operation import Operation # noqa (API import)
from .operation import ElementOperation # noqa (Deprecated API import)
from .element import * # noqa (API import)
from .element import __all__ as elements_list
from . import util # noqa (API import)
from .util import extension, renderer, output, opts # noqa (API import)
# Suppress warnings generated by NumPy in matplotlib
# Expected to be fixed in next matplotlib release
import warnings
warnings.filterwarnings("ignore",
message="elementwise comparison failed; returning scalar instead")
try:
import IPython # noqa (API import)
from .ipython import notebook_extension
extension = notebook_extension # noqa (name remapping)
except ImportError as e:
class notebook_extension(param.ParameterizedFunction):
def __call__(self, *args, **opts): # noqa (dummy signature)
raise Exception("IPython notebook not available: use hv.extension instead.")
# A single holoviews.rc file may be executed if found.
for rcfile in [os.environ.get("HOLOVIEWSRC", ''),
os.path.abspath(os.path.join(os.path.split(__file__)[0],
'..', 'holoviews.rc')),
"~/.holoviews.rc",
"~/.config/holoviews/holoviews.rc"]:
filename = os.path.expanduser(rcfile)
if os.path.isfile(filename):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
try:
exec(code)
except Exception as e:
print("Warning: Could not load %r [%r]" % (filename, str(e)))
break
def help(obj, visualization=True, ansi=True, backend=None,
recursive=False, pattern=None):
"""
Extended version of the built-in help that supports parameterized
functions and objects. A pattern (regular expression) may be used to
filter the output and if recursive is set to True, documentation for
the supplied object is shown. Note that the recursive option will
only work with an object instance and not a class.
If ansi is set to False, all ANSI color
codes are stripped out.
"""
backend = backend if backend else Store.current_backend
info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,
recursive=recursive, pattern=pattern, elements=elements_list)
msg = ("\nTo view the visualization options applicable to this "
"object or class, use:\n\n"
" holoviews.help(obj, visualization=True)\n\n")
if info:
print((msg if visualization is False else '') + info)
else:
pydoc.help(obj)
|
[] |
[] |
[
"HOLOVIEWSRC"
] |
[]
|
["HOLOVIEWSRC"]
|
python
| 1 | 0 | |
extensions/cli/landsat8/src/test/java/org/locationtech/geowave/format/landsat8/IngestRunnerTest.java
|
/**
* Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
*
* <p> See the NOTICE file distributed with this work for additional information regarding copyright
* ownership. All rights reserved. This program and the accompanying materials are made available
* under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
* available at http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package org.locationtech.geowave.format.landsat8;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.lang.SystemUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import org.locationtech.geowave.adapter.raster.plugin.gdal.InstallGdal;
import org.locationtech.geowave.core.cli.api.OperationParams;
import org.locationtech.geowave.core.cli.operations.config.options.ConfigOptions;
import org.locationtech.geowave.core.cli.parser.ManualOperationParams;
import org.locationtech.geowave.core.geotime.index.SpatialTemporalDimensionalityTypeProvider.Bias;
import org.locationtech.geowave.core.geotime.index.api.SpatialIndexBuilder;
import org.locationtech.geowave.core.geotime.index.api.SpatialTemporalIndexBuilder;
import org.locationtech.geowave.core.geotime.index.dimension.TemporalBinningStrategy.Unit;
import org.locationtech.geowave.core.store.CloseableIterator;
import org.locationtech.geowave.core.store.GeoWaveStoreFinder;
import org.locationtech.geowave.core.store.api.QueryBuilder;
import org.locationtech.geowave.core.store.cli.store.DataStorePluginOptions;
import org.locationtech.geowave.core.store.cli.store.StoreLoader;
import org.locationtech.geowave.core.store.index.IndexPluginOptions.PartitionStrategy;
import org.locationtech.geowave.core.store.index.IndexStore;
import org.locationtech.geowave.core.store.memory.MemoryStoreFactoryFamily;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.ParameterException;
import it.geosolutions.jaiext.JAIExt;
public class IngestRunnerTest {
@BeforeClass
public static void setup() throws IOException {
// Skip this test if we're on a Mac
org.junit.Assume.assumeTrue(isNotMac());
GeoWaveStoreFinder.getRegisteredStoreFactoryFamilies().put(
"memory",
new MemoryStoreFactoryFamily());
InstallGdal.main(new String[] {System.getenv("GDAL_DIR")});
}
private static boolean isNotMac() {
return !SystemUtils.IS_OS_MAC;
}
@Test
public void testIngest() throws Exception {
JAIExt.initJAIEXT();
final Landsat8BasicCommandLineOptions analyzeOptions = new Landsat8BasicCommandLineOptions();
analyzeOptions.setNBestScenes(1);
analyzeOptions.setCqlFilter(
"BBOX(shape,-76.6,42.34,-76.4,42.54) and band='BQA' and sizeMB < 1");
analyzeOptions.setUseCachedScenes(true);
analyzeOptions.setWorkspaceDir(Tests.WORKSPACE_DIR);
final Landsat8DownloadCommandLineOptions downloadOptions =
new Landsat8DownloadCommandLineOptions();
downloadOptions.setOverwriteIfExists(false);
final Landsat8RasterIngestCommandLineOptions ingestOptions =
new Landsat8RasterIngestCommandLineOptions();
ingestOptions.setRetainImages(true);
ingestOptions.setCreatePyramid(true);
ingestOptions.setCreateHistogram(true);
ingestOptions.setScale(100);
final VectorOverrideCommandLineOptions vectorOverrideOptions =
new VectorOverrideCommandLineOptions();
vectorOverrideOptions.setVectorStore("memorystore2");
vectorOverrideOptions.setVectorIndex("spatialindex,spatempindex");
final IngestRunner runner =
new IngestRunner(
analyzeOptions,
downloadOptions,
ingestOptions,
vectorOverrideOptions,
Arrays.asList("memorystore", "spatialindex"));
final ManualOperationParams params = new ManualOperationParams();
params.getContext().put(
ConfigOptions.PROPERTIES_FILE_CONTEXT,
new File(
IngestRunnerTest.class.getClassLoader().getResource(
"geowave-config.properties").toURI()));
createIndices(params, "memorystore");
createIndices(params, "memorystore2");
runner.runInternal(params);
try (CloseableIterator<Object> results =
getStorePluginOptions(params, "memorystore").createDataStore().query(
QueryBuilder.newBuilder().build())) {
assertTrue("Store is not empty", results.hasNext());
}
// Not sure what assertions can be made about the index.
}
private DataStorePluginOptions getStorePluginOptions(
final OperationParams params,
final String storeName) {
final File configFile = (File) params.getContext().get(ConfigOptions.PROPERTIES_FILE_CONTEXT);
final StoreLoader inputStoreLoader = new StoreLoader(storeName);
if (!inputStoreLoader.loadFromConfig(configFile, new JCommander().getConsole())) {
throw new ParameterException("Cannot find store name: " + inputStoreLoader.getStoreName());
}
return inputStoreLoader.getDataStorePlugin();
}
private void createIndices(final OperationParams params, final String storeName) {
final IndexStore indexStore = getStorePluginOptions(params, storeName).createIndexStore();
// Create the spatial index
final SpatialIndexBuilder builder = new SpatialIndexBuilder();
builder.setName("spatialindex");
builder.setNumPartitions(1);
builder.setIncludeTimeInCommonIndexModel(false);
indexStore.addIndex(builder.createIndex());
// Create the spatial temporal index
final SpatialTemporalIndexBuilder st_builder = new SpatialTemporalIndexBuilder();
st_builder.setName("spatempindex");
st_builder.setBias(Bias.BALANCED);
st_builder.setMaxDuplicates(-1);
st_builder.setNumPartitions(1);
st_builder.setPartitionStrategy(PartitionStrategy.ROUND_ROBIN);
st_builder.setPeriodicity(Unit.DAY);
indexStore.addIndex(st_builder.createIndex());
}
}
|
[
"\"GDAL_DIR\""
] |
[] |
[
"GDAL_DIR"
] |
[]
|
["GDAL_DIR"]
|
java
| 1 | 0 | |
libs/go/aealite/wallet/wallet.go
|
/* -*- coding: utf-8 -*-
* ------------------------------------------------------------------------------
*
* Copyright 2018-2019 Fetch.AI Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* ------------------------------------------------------------------------------
*/
package wallet
import (
"log"
"os"
"github.com/joho/godotenv"
"github.com/rs/zerolog"
)
var logger zerolog.Logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: "15:04:05.000",
}).
With().Timestamp().
Str("package", "Wallet").
Logger()
type Wallet struct {
LedgerId string
Address string
PublicKey string
PrivateKey string
}
func (wallet *Wallet) InitFromEnv() error {
env_file := os.Args[1]
logger.Debug().Msgf("env_file: %s", env_file)
err := godotenv.Overload(env_file)
if err != nil {
logger.Error().Str("err", err.Error()).
Msg("Error loading env file")
return err
}
wallet.LedgerId = os.Getenv("AEA_LEDGER_ID")
if wallet.LedgerId == "" {
log.Fatal("No AEA_LEDGER_ID provided in env file.")
}
wallet.Address = os.Getenv("AEA_ADDRESS")
wallet.PublicKey = os.Getenv("AEA_PUBLIC_KEY")
wallet.PrivateKey = os.Getenv("AEA_PRIVATE_KEY")
if wallet.PrivateKey == "" {
log.Fatal("No AEA_PRIVATE_KEY provided in env file.")
}
public_key, err := PublicKeyFromPrivateKey(wallet.LedgerId, wallet.PrivateKey)
if err != nil {
log.Fatal("Could not derive public key.")
}
if (wallet.PublicKey != "") && (public_key != wallet.PublicKey) {
log.Fatal("Derived and provided public_key don't match.")
}
wallet.PublicKey = public_key
address, err := AddressFromPublicKey(wallet.LedgerId, wallet.PublicKey)
if err != nil {
log.Fatal("Could not derive address.")
}
if (wallet.Address != "") && (address != wallet.Address) {
log.Fatal("Derived and provided address don't match.")
}
wallet.Address = address
return nil
}
|
[
"\"AEA_LEDGER_ID\"",
"\"AEA_ADDRESS\"",
"\"AEA_PUBLIC_KEY\"",
"\"AEA_PRIVATE_KEY\""
] |
[] |
[
"AEA_ADDRESS",
"AEA_PUBLIC_KEY",
"AEA_PRIVATE_KEY",
"AEA_LEDGER_ID"
] |
[]
|
["AEA_ADDRESS", "AEA_PUBLIC_KEY", "AEA_PRIVATE_KEY", "AEA_LEDGER_ID"]
|
go
| 4 | 0 | |
cmd/backendapi.go
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"database/sql"
"errors"
"fmt"
"github.com/ant0ine/go-json-rest/rest"
"github.com/crunchydata/crunchy-postgres-container-client/backend"
"log"
"net/http"
"os"
)
func main() {
fmt.Println("at top of backend main")
var err error
handler := rest.ResourceHandler{
PreRoutingMiddlewares: []rest.Middleware{
&rest.CorsMiddleware{
RejectNonCorsRequests: false,
OriginValidator: func(origin string, request *rest.Request) bool {
return true
},
AllowedMethods: []string{"DELETE", "GET", "POST", "PUT"},
AllowedHeaders: []string{
"Accept", "Content-Type", "X-Custom-Header", "Origin"},
AccessControlAllowCredentials: true,
AccessControlMaxAge: 3600,
},
},
EnableRelaxedContentType: true,
}
err = handler.SetRoutes(
&rest.Route{"POST", "/car/add", AddCar},
&rest.Route{"POST", "/car/update", UpdateCar},
&rest.Route{"GET", "/car/list", GetAllCars},
&rest.Route{"GET", "/car/:ID", GetCar},
&rest.Route{"POST", "/car/delete", DeleteCar},
&rest.Route{"GET", "/conn/list", GetConn},
)
if err != nil {
log.Fatal(err)
}
log.Fatal(http.ListenAndServe(":13002", &handler))
}
func GetCar(w rest.ResponseWriter, r *rest.Request) {
ID := r.PathParam("ID")
if ID == "" {
fmt.Println("GetCar: ID not found in request")
rest.Error(w, "ID not passed", http.StatusBadRequest)
return
}
fmt.Println("GetCar called with ID=" + ID)
dbConn, err := getConnection()
if err != nil {
fmt.Println("GetCar: error in connection" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var car backend.Car
car, err = backend.GetCar(dbConn, ID)
if err != nil {
fmt.Println("GetCar: error" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&car)
}
func GetAllCars(w rest.ResponseWriter, r *rest.Request) {
fmt.Println("GetAllCars:called")
dbConn, err := getConnection()
if err != nil {
fmt.Println("GetAllCars: error in connection" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
cars, err := backend.GetAllCars(dbConn)
if err != nil {
fmt.Println("GetAllCars: error " + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&cars)
}
func AddCar(w rest.ResponseWriter, r *rest.Request) {
car := backend.Car{}
err := r.DecodeJsonPayload(&car)
if err != nil {
fmt.Println("AddCar: error in decode" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var dbConn *sql.DB
dbConn, err = getConnection()
if err != nil {
fmt.Println("AddCar: error in connection" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var carid string
carid, err = backend.AddCar(dbConn, car)
if err != nil {
fmt.Println(err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
car.ID = carid
w.WriteJson(&car)
}
func UpdateCar(w rest.ResponseWriter, r *rest.Request) {
car := backend.Car{}
err := r.DecodeJsonPayload(&car)
if err != nil {
fmt.Println("UpdateCar: error in decode" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var dbConn *sql.DB
dbConn, err = getConnection()
if err != nil {
fmt.Println("UpdateCar: error in connection" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = backend.UpdateCar(dbConn, car)
if err != nil {
fmt.Println("UpdateCar: error in decode" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var status = "OK"
w.WriteJson(&status)
}
func DeleteCar(w rest.ResponseWriter, r *rest.Request) {
car := backend.Car{}
err := r.DecodeJsonPayload(&car)
if err != nil {
fmt.Println("DeleteCar: error in decode" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var dbConn *sql.DB
dbConn, err = getConnection()
if err != nil {
fmt.Println("DeleteCar: error in connection" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = backend.DeleteCar(dbConn, car.ID)
if err != nil {
fmt.Println("DeleteCar: error in decode" + err.Error())
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var status = "OK"
w.WriteJson(&status)
}
func getConnection() (*sql.DB, error) {
var dbConn *sql.DB
var user = os.Getenv("PG_USER")
if user == "" {
return dbConn, errors.New("PG_USER env var not set")
}
var host = os.Getenv("PG_HOST")
if host == "" {
return dbConn, errors.New("PG_HOST env var not set")
}
var password = os.Getenv("PG_PASSWORD")
if password == "" {
return dbConn, errors.New("PG_PASSWORD env var not set")
}
var database = os.Getenv("PG_DATABASE")
if database == "" {
return dbConn, errors.New("PG_DATABASE env var not set")
}
var port = "5432"
dbConn, err := sql.Open("postgres", "sslmode=disable user="+user+" host="+host+" port="+port+" dbname="+database+" password="+password)
return dbConn, err
}
type ConnInfo struct {
PG_USER string
PG_DATABASE string
PG_HOST string
PG_PASSWORD string
}
func GetConn(w rest.ResponseWriter, r *rest.Request) {
fmt.Println("GetConn called")
conn := ConnInfo{}
conn.PG_USER = os.Getenv("PG_USER")
conn.PG_DATABASE = os.Getenv("PG_DATABASE")
conn.PG_HOST = os.Getenv("PG_HOST")
conn.PG_PASSWORD = os.Getenv("PG_PASSWORD")
w.WriteJson(&conn)
}
|
[
"\"PG_USER\"",
"\"PG_HOST\"",
"\"PG_PASSWORD\"",
"\"PG_DATABASE\"",
"\"PG_USER\"",
"\"PG_DATABASE\"",
"\"PG_HOST\"",
"\"PG_PASSWORD\""
] |
[] |
[
"PG_USER",
"PG_HOST",
"PG_DATABASE",
"PG_PASSWORD"
] |
[]
|
["PG_USER", "PG_HOST", "PG_DATABASE", "PG_PASSWORD"]
|
go
| 4 | 0 | |
chc/api/ApiAssumption.py
|
# ------------------------------------------------------------------------------
# CodeHawk C Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from chc.api.CFunctionApi import CFunctionApi
from chc.app.CFunction import CFunction
from chc.proof.CPOPredicate import CPOPredicate
class ApiAssumption(object):
def __init__(
self,
capi: "CFunctionApi",
id: int,
predicate: "CPOPredicate",
ppos: List[int],
spos: List[int],
isglobal: bool = False,
isfile: bool = False,
):
self.id = id
self.capi = capi # api/CFunctionAPI
self.cfun: "CFunction" = self.capi.cfun # app/CFunction
self.predicate = predicate # proof/CPOPredicate
self.ppos = ppos
self.spos = spos
self.isglobal = isglobal # assumption includes global variable
self.isfile = isfile
def __str__(self) -> str:
strppos = ""
strspos = ""
if len(self.ppos) > 0:
strppos = (
"\n --Dependent ppo's: ["
+ ",".join(str(i) for i in self.ppos)
+ "]"
)
if len(self.spos) > 0:
strspos = (
"\n --Dependent spo's: ["
+ ",".join(str(i) for i in self.spos)
+ "]"
)
if self.isglobal:
isglobal = " (global)"
else:
isglobal = ""
return str(self.id) + " " + str(self.predicate) + isglobal + strppos + strspos
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/objstore/cos/cos.go
|
// Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
package cos
import (
"context"
"fmt"
"io"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/go-kit/kit/log"
"github.com/mozillazg/go-cos"
"github.com/pkg/errors"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/thanos-io/thanos/pkg/objstore/clientutil"
"github.com/thanos-io/thanos/pkg/runutil"
"gopkg.in/yaml.v2"
)
// DirDelim is the delimiter used to model a directory structure in an object store bucket.
const dirDelim = "/"
// Bucket implements the store.Bucket interface against cos-compatible(Tencent Object Storage) APIs.
type Bucket struct {
logger log.Logger
client *cos.Client
name string
}
// Config encapsulates the necessary config values to instantiate an cos client.
type Config struct {
Bucket string `yaml:"bucket"`
Region string `yaml:"region"`
AppId string `yaml:"app_id"`
SecretKey string `yaml:"secret_key"`
SecretId string `yaml:"secret_id"`
}
// Validate checks to see if mandatory cos config options are set.
func (conf *Config) validate() error {
if conf.Bucket == "" ||
conf.AppId == "" ||
conf.Region == "" ||
conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("insufficient cos configuration information")
}
return nil
}
// NewBucket returns a new Bucket using the provided cos configuration.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
var config Config
if err := yaml.Unmarshal(conf, &config); err != nil {
return nil, errors.Wrap(err, "parsing cos configuration")
}
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validate cos configuration")
}
bucketUrl := cos.NewBucketURL(config.Bucket, config.AppId, config.Region, true)
b, err := cos.NewBaseURL(bucketUrl.String())
if err != nil {
return nil, errors.Wrap(err, "initialize cos base url")
}
client := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: config.SecretId,
SecretKey: config.SecretKey,
},
})
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for COS.
func (b *Bucket) Name() string {
return b.name
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) {
resp, err := b.client.Object.Head(ctx, name, nil)
if err != nil {
return objstore.ObjectAttributes{}, err
}
size, err := clientutil.ParseContentLength(resp.Header)
if err != nil {
return objstore.ObjectAttributes{}, err
}
// tencent cos return Last-Modified header in RFC1123 format.
// see api doc for details: https://intl.cloud.tencent.com/document/product/436/7729
mod, err := clientutil.ParseLastModified(resp.Header, time.RFC1123)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: size,
LastModified: mod,
}, nil
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
if _, err := b.client.Object.Put(ctx, name, r, nil); err != nil {
return errors.Wrap(err, "upload cos object")
}
return nil
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
if _, err := b.client.Object.Delete(ctx, name); err != nil {
return errors.Wrap(err, "delete cos object")
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, dirDelim) + dirDelim
}
for object := range b.listObjects(ctx, dir, options...) {
if object.err != nil {
return object.err
}
if object.key == "" {
continue
}
if err := f(object.key); err != nil {
return err
}
}
return nil
}
func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
if len(name) == 0 {
return nil, errors.New("given object name should not empty")
}
opts := &cos.ObjectGetOptions{}
if length != -1 {
if err := setRange(opts, off, off+length-1); err != nil {
return nil, err
}
}
resp, err := b.client.Object.Get(ctx, name, opts)
if err != nil {
return nil, err
}
if _, err := resp.Body.Read(nil); err != nil {
runutil.ExhaustCloseWithLogOnErr(b.logger, resp.Body, "cos get range obj close")
return nil, err
}
return resp.Body, nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
if _, err := b.client.Object.Head(ctx, name, nil); err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrap(err, "head cos object")
}
return true, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch tmpErr := errors.Cause(err).(type) {
case *cos.ErrorResponse:
if tmpErr.Code == "NoSuchKey" ||
(tmpErr.Response != nil && tmpErr.Response.StatusCode == http.StatusNotFound) {
return true
}
return false
default:
return false
}
}
func (b *Bucket) Close() error { return nil }
type objectInfo struct {
key string
err error
}
func (b *Bucket) listObjects(ctx context.Context, objectPrefix string, options ...objstore.IterOption) <-chan objectInfo {
objectsCh := make(chan objectInfo, 1)
// If recursive iteration is enabled we should pass an empty delimiter.
delimiter := dirDelim
if objstore.ApplyIterOptions(options...).Recursive {
delimiter = ""
}
go func(objectsCh chan<- objectInfo) {
defer close(objectsCh)
var marker string
for {
result, _, err := b.client.Bucket.Get(ctx, &cos.BucketGetOptions{
Prefix: objectPrefix,
MaxKeys: 1000,
Marker: marker,
Delimiter: delimiter,
})
if err != nil {
select {
case objectsCh <- objectInfo{
err: err,
}:
case <-ctx.Done():
}
return
}
for _, object := range result.Contents {
select {
case objectsCh <- objectInfo{
key: object.Key,
}:
case <-ctx.Done():
return
}
}
// The result of CommonPrefixes contains the objects
// that have the same keys between Prefix and the key specified by delimiter.
for _, obj := range result.CommonPrefixes {
select {
case objectsCh <- objectInfo{
key: obj,
}:
case <-ctx.Done():
return
}
}
if !result.IsTruncated {
return
}
marker = result.NextMarker
}
}(objectsCh)
return objectsCh
}
func setRange(opts *cos.ObjectGetOptions, start, end int64) error {
if start == 0 && end < 0 {
opts.Range = fmt.Sprintf("bytes=%d", end)
} else if 0 < start && end == 0 {
opts.Range = fmt.Sprintf("bytes=%d-", start)
} else if 0 <= start && start <= end {
opts.Range = fmt.Sprintf("bytes=%d-%d", start, end)
} else {
return errors.Errorf("Invalid range specified: start=%d end=%d", start, end)
}
return nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("COS_BUCKET"),
AppId: os.Getenv("COS_APP_ID"),
Region: os.Getenv("COS_REGION"),
SecretId: os.Getenv("COS_SECRET_ID"),
SecretKey: os.Getenv("COS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := validateForTest(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" {
if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("COS_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset COS_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as COS not being fully strong consistent.")
}
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "cos check bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "COS bucket for COS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
c.Bucket = objstore.CreateTemporaryTestBucketName(t)
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if _, err := b.client.Bucket.Put(context.Background(), nil); err != nil {
return nil, nil, err
}
t.Log("created temporary COS bucket for COS tests with name", c.Bucket)
return b, func() {
objstore.EmptyBucket(t, context.Background(), b)
if _, err := b.client.Bucket.Delete(context.Background()); err != nil {
t.Logf("deleting bucket %s failed: %s", c.Bucket, err)
}
}, nil
}
func validateForTest(conf Config) error {
if conf.AppId == "" ||
conf.Region == "" ||
conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("insufficient cos configuration information")
}
return nil
}
|
[
"\"COS_BUCKET\"",
"\"COS_APP_ID\"",
"\"COS_REGION\"",
"\"COS_SECRET_ID\"",
"\"COS_SECRET_KEY\"",
"\"THANOS_ALLOW_EXISTING_BUCKET_USE\""
] |
[] |
[
"COS_SECRET_ID",
"COS_BUCKET",
"COS_APP_ID",
"COS_SECRET_KEY",
"THANOS_ALLOW_EXISTING_BUCKET_USE",
"COS_REGION"
] |
[]
|
["COS_SECRET_ID", "COS_BUCKET", "COS_APP_ID", "COS_SECRET_KEY", "THANOS_ALLOW_EXISTING_BUCKET_USE", "COS_REGION"]
|
go
| 6 | 0 | |
gateway-server/app.py
|
import logging
from flask import Flask, request, render_template, flash, jsonify, session, redirect, url_for
from utils.helpers import validate_number, to_twilio_number, return_facilities
from clients.slack import Slack
from clients.twilio import Twilio
from clients.unomi import Unomi
from clients.two_one_one import TwoOneOne
from clients.aunt_bertha import AuntBertha
from datetime import datetime
from utils.user import User
import sqlite3
import os
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
# TODO: Set up application logging
app = Flask(__name__)
# app.secret_key = os.environ["SECRET_KEY"] or b'_5#y2L"F4Q8z\n\xec]/' # or not working!?
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
# app.config['TESTING'] = False
# app.config['LOGIN_DISABLED'] = False
slack_client = Slack()
twilio_client = Twilio()
unomi_client = Unomi()
too_client = TwoOneOne()
aunt_bertha_client = AuntBertha()
@app.errorhandler(500)
def internal_error(error):
return jsonify(error)
# Flask-Login Manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/terms')
def terms():
return render_template('terms.html')
@app.route('/submit', methods=['POST'])
def web_submit():
"""
Handles phone number submission coming from the website
"""
# Look for phone number from request form or request value:
number = request.values.get('From', None)
if not number:
try:
number = request.form["number"]
app.logger.error("Using web form submission.")
except KeyError as e:
app.logger.error("Invalid submission.")
else:
app.logger.error("Using SMS submission.")
number = validate_number(number)
body = request.values.get('Body', None)
channel, phone_number = slack_client.start_engagement(number)
profile = unomi_client.create_profile(profile_id=channel,
properties={'phoneNumber': phone_number})
assert(profile['itemId'] == channel) # Sanity check
unomi_client.track_event(channel, 'engagementStarted', {})
if body: # Case when this is a SMS submission
slack_client.forward_twilio_message(channel, body)
unomi_client.track_inbound_message(channel, body)
return render_template('confirmation.html')
@app.route('/message', methods=['POST'])
def message():
"""
Callback for Twilio. Messages posted to this route will forward the message
to the Slack channel associated with the senders phone number.
"""
body = request.values.get('Body', None)
from_zip = request.values.get('FromZip', None)
from_city = request.values.get('FromCity', None)
from_state = request.values.get('FromState', None)
number = to_twilio_number(request.values.get('From', None))
channel = unomi_client.channel_from_phone_number(number)
profile = unomi_client.profile_search(channel)
app.logger.error("SLACK CHANNEL?: {0}".format(slack_client.does_channel_exist(channel)))
if not slack_client.does_channel_exist(channel):
app.logger.error("Performing Web Submit: {0} {1}".format(channel, body))
web_submit() # Creates a profile
else:
app.logger.error("Channel Body: {0} {1}".format(channel, body))
slack_client.forward_twilio_message(channel, body)
unomi_client.track_inbound_message(channel, body)
if "city" not in profile["properties"].keys():
if from_city or from_state or from_zip:
unomi_client.update_profile(
profile_id=channel,
properties={
"city": from_city,
"state": from_state,
"zipcode": from_zip
}
)
body = ":world_map: Approximate Location: {0}, {1} {2}".format(from_city, from_state, from_zip)
response = slack_client.forward_twilio_message(channel, body)
return "200"
@app.route('/text', methods=['POST'])
def text():
"""
Callback for Slack. /send commands in Slack will trigger a post to this
route with parameters as defined here:
https://api.slack.com/slash-commands#app_command_handling
"""
channel_name = request.values.get('channel_name', None)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
app.logger.debug("Request: {0}".format(request.values))
channel_name = slack_client.group_name_from_group_id(channel_id)
number = unomi_client.phone_number_from_channel(channel_name)
if number:
text = "@" + user_name + ": " + body
try:
twilio_client.text(number, body)
except Exception as e:
return jsonify(e)
app.logger.debug("Slack user: {0}".format(user_name))
unomi_client.track_outbound_message(channel_name, body, user_name)
return jsonify(
response_type='in_channel',
text="Message sent",
)
else:
return 400
@app.route('/assessed', methods=['POST'])
def assessed():
"""
Callback for Slack, /assessed commands in Slack will trigger a post to this
route with parameters.
"""
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
channel_name = slack_client.group_name_from_group_id(channel_id)
profile = unomi_client.profile_search(channel_name)
#profile["properties"]["assessed"] = datetime.today().strftime("%Y-%m-%d")
profile["properties"]["hadAssessment"] = 'yes'
unomi_client.update_profile(channel_name, profile["properties"])
return jsonify(
response_type='in_channel',
text="Updated saved.",
)
@app.route('/treated', methods=['POST'])
def treated():
"""
Callback for Slack, /treated commands in Slack will trigger a post to this
route with parameters.
"""
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
channel_name = slack_client.group_name_from_group_id(channel_id)
profile = unomi_client.profile_search(channel_name)
#profile["properties"]["treated"] = datetime.today().strftime("%Y-%m-%d")
profile["properties"]["inTreatment"] = 'yes'
unomi_client.update_profile(channel_name, profile["properties"])
return jsonify(
response_type='in_channel',
text="Updated saved.",
)
@app.route('/need', methods=['POST'])
def need():
"""
Callback for Slack, /need commands in Slack will trigger a post to this
route with parameters.
The usage for /need is:
/need [name]
ex: /need bed
"""
app.logger.info(request.values)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
text = body.strip()
need = {
"name": text,
"timeStamp" : datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
}
channel_name = slack_client.group_name_from_group_id(channel_id)
app.logger.info(channel_name)
profile = unomi_client.profile_search(channel_name)
# Update profile property with this need
try:
app.logger.info(profile)
needs = profile["properties"]["needs"]
needs = list(set(needs.append(need)))
except TypeError:
needs = [need]
profile["properties"]["needs"] = needs
unomi_client.update_profile(channel_name, profile["properties"])
return jsonify(
response_type='in_channel',
text="Need saved: {0}".format(text),
)
@app.route('/demographics', methods=['POST'])
def demographics():
"""
Callback for Slack, /demographic commands in Slack will trigger a post to this
route with parameters.
The usage for /demographic is:
/demographic [key] [value]
ex: /demographic gender male
"""
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
text = body.strip().split()
key = text[0]
value = text[1]
demographic = {
key: value
}
channel_name = slack_client.group_name_from_group_id(channel_id)
profile = unomi_client.profile_search(channel_name)
# Update profile property with this need
try:
demographics = profile["properties"]["demographics"]
demographics[key] = value
except KeyError:
demographics = {key: value}
text = "Demographic saved: {0}={1}".format(key, value)
profile["properties"]["demographics"] = demographics
unomi_client.update_profile(channel_name, profile["properties"])
return jsonify(
response_type='in_channel',
text=text
)
@app.route('/event', methods=['POST'])
def event():
"""
Callback for Slack, /event commands in Slack will trigger a post to this
route with parameters.
The usage for /event is:
/event [name]
ex: /event bed
"""
print("SLACK REQUEST: ", request.values)
channel_name = request.values.get('channel_name', None)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
event = body.strip()
profile = unomi_client.profile_search(channel_name)
unomi_client.track_event(channel_name, 'userGenerated', {'value': event}, user_name)
return jsonify(
response_type='in_channel',
text="Event saved: {0}".format(event))
@app.route('/stage', methods=['POST'])
def stage():
"""
Callback for Slack, /stage commands in Slack will trigger a post to this
route with parameters.
The usage for /stage is:
/stage [stage] [notes]
ex: /stage preparation meeting scheduled with physician
"""
print("SLACK REQUEST: ", request.values)
channel_name = request.values.get('channel_name', None)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
stage = body.split()[0]
notes = ' '.join(body.split()[1:])
profile = unomi_client.profile_search(channel_name)
unomi_client.track_event(channel_name, 'stageChange', {'value': stage, 'notes': notes}, user_name)
profile = unomi_client.profile_search(channel_name)
profile["properties"]["stage"] = {"name":stage,"notes":notes}
unomi_client.update_profile(channel_name, profile["properties"])
return jsonify(
response_type='in_channel',
text="Event saved")
@app.route('/facilities', methods=['POST'])
def facilities():
zipcode = request.form["text"]
data= return_facilities(zipcode=zipcode)
attachments = []
for i in range(len(data)):
attachment = {
"title": "Facility",
"fields": [{
"value": "Address: {0}".format(data[i])
}]
}
attachments.append(attachment)
return jsonify(
response_type='in_channel',
text="Facilities",
attachments=attachments
)
@app.route('/beds', methods=['POST'])
def beds():
"""
Callback for Slack, /beds commands in Slack will trigger a post to this
route with parameters.
The usage for /beds is:
/beds [county] [gender] [age]
ex: /beds philadelphia male 21
"""
channel_name = request.values.get('channel_name', None)
user_name = request.values.get('user_name', None)
row = request.form["text"].split()
county, gender, age = None, None, None
try:
county = row[0]
gender = row[1]
age = row[2]
except IndexError:
# OK, only set if needed
pass
beds = return_beds(county, gender, age)
attachments = []
for bed in beds:
attachment = {
"title": bed["name"],
"fields": [{
"value": "Phone: {0}".format(bed["phone"])
}]
}
attachments.append(attachment)
if len(attachments) == 0: # No beds found
event_info = {'county': county, 'age': age, 'gender':gender }
unomi_client.track_event(channel_name, 'noBeds', event_info, user_name)
text = "No beds found, this event has been logged."
else:
text = "Open Beds in {county}".format(county=county)
return jsonify(
response_type='in_channel',
text=text,
attachments=attachments
)
@app.route('/aunt_bertha', methods=['POST'])
def aunt_bertha():
"""
Callback for Slack, /auntbertha commands in Slack will trigger a post to this
route with parameters.
The usage for /211 is:
/auntbertha [zipcode] [keywords]
ex: /auntberta 19107 women recovery
"""
app.logger.debug("SLACK REQUEST: ", request.values)
channel_name = request.values.get('channel_name', None)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
text = body.strip().split()
zipcode = text[0]
keywords = " ".join(text[1:])
profile = unomi_client.profile_search(channel_name)
unomi_client.track_event(channel_name, 'auntBerthaLookup', {'keywords': keywords, 'zipcode':zipcode}, user_name)
attachments = aunt_bertha_client.search(keywords, zipcode)
return jsonify(
response_type='in_channel',
text="Aunt Bertha Results",
attachments=attachments)
@app.route('/211', methods=['POST'])
def two_one_one():
"""
Callback for Slack, /211 commands in Slack will trigger a post to this
route with parameters.
The usage for /211 is:
/211 [keyword] [zipcode]
ex: /211 shelter 19011
"""
print("SLACK REQUEST: ", request.values)
channel_name = request.values.get('channel_name', None)
body = request.values.get('text', None)
channel_id = request.values.get('channel_id', None)
user_name = request.values.get('user_name', None)
text = body.strip().split()
keyword = text[0]
zipcode = text[1]
profile = unomi_client.profile_search(channel_name)
unomi_client.track_event(channel_name, 'twoOneOneLookup', {'value': "{0} {1}".format(keyword, zipcode)}, user_name)
attachments = too_client.search(keyword, zipcode)
return jsonify(
response_type='in_channel',
text="211 Results",
attachments=attachments)
"""
Admin User Interface
"""
@app.route('/profiles', methods=['GET'])
@login_required
def profiles_index():
print("attempting")
profiles = unomi_client.list_profiles()
assessments = 0
treatments = 0
for profile in profiles:
try:
if profile["properties"]["hadAssessment"] == 'yes':
assessments += 1
if profile["properties"]["inTreatment"] == 'yes':
treatments += 1
except:
pass
return render_template('profiles/index.html',
profiles=profiles,
profiles_count=len(profiles),
treatments=treatments,
treatment_rate=round(treatments/len(profiles)*100,1),
assessments=assessments,
assessment_rate=round(assessments/len(profiles)*100,1))
@app.route('/profiles/<profile_id>', methods=['GET'])
@login_required
def profiles_show(profile_id):
profile = unomi_client.profile_search(profile_id)
events = unomi_client.list_events(profile_id)
crss_messages = {} #{ ('Michael Ghen', '+12154781286'): 17, ... }
for event in events:
try:
user_name = event['source']['itemId']
crs_name, phone_number = slack_client.get_phone_number_by_user_name(user_name)
app.logger.debug("{0}, {1}".format(crs_name, phone_number))
key = (crs_name, phone_number)
if key in crss_messages.keys():
crss_messages[key] += 1
else:
crss_messages[key] = 1
# Case when there is an inbound message, no CRS involved
except TypeError as e:
app.logger.warning(e)
pass # Go to the next event
all_crs_messages = []
for key, value in crss_messages.items():
all_crs_messages.append([key[0], key[1], value])
crss_messages = all_crs_messages
return render_template('profiles/show.html', profile=profile, events=events, crss_messages=crss_messages)
@app.route('/profiles/needs', methods=['GET'])
@login_required
def profiles_needs_index():
profiles = unomi_client.list_profiles()
needs = []
for profile in profiles:
try:
for need in profile["properties"]["needs"]:
needs.append(
(profile["itemId"], need, "Philadelphia")
)
except KeyError:
# No needs
pass
return render_template('profiles/needs/index.html', needs=needs)
@app.route('/profiles/needs/data', methods=['GET'])
@login_required
def profiles_needs_data():
profiles = unomi_client.list_profiles()
needs = {}
county_needs = {}
counties = []
for profile in profiles:
if 'needs' in profile["properties"].keys():
for need in profile["properties"]["needs"]:
if need["name"] in needs.keys():
needs[need["name"]] += 1
else:
needs[need["name"]] = 1
county = profile["properties"]["county"]
if need["name"] in county_needs.keys():
if county in county_needs[need["name"]].keys():
county_needs[need["name"]][county] += 1
else:
county_needs[need["name"]][county] = 1
counties.append(county)
else:
county_needs[need["name"]] = {}
pie_data = []
for key, value in needs.items():
pie_data.append({'name': key, 'y': value})
bar_series = [] # {name:, data:}
for key, value in county_needs.items():
values = list(value.values())
bar_series.append({'name': key, 'data': values})
return jsonify( {'pie_data': pie_data, 'bar_series': bar_series, 'counties': list(set(counties))} )
# user managment/routes
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user = User.validate(request.form["username"], request.form["password"])
if user is None:
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user)
flash('Logged in successfully.')
next = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
# if not is_safe_url(next):
# return abort(400)
return redirect(next or url_for('index'))
else:
if current_user.is_authenticated:
return redirect(url_for('index'))
return render_template('login.html')
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
src/azure-cli/azure/cli/command_modules/acs/custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import AgentPool
from azure.mgmt.containerservice.v2019_08_01.models import ResourceReference
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count",'
'"--load-balancer-outbound-ips",'
'"--load-balancer-outbound-ip-prefixes",'
'"--attach-acr" or "--dettach-acr",'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
"""parse and build load balancer profile"""
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
|
[] |
[] |
[
"ACC_TERM_ID",
"PATH"
] |
[]
|
["ACC_TERM_ID", "PATH"]
|
python
| 2 | 0 | |
tools/project-creator/Python2.6.6/Lib/idlelib/PyShell.py
|
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,\
lineno, file=file, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno,
file=None, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip() \
if line is None else line
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
[] |
[] |
[
"PYTHONSTARTUP",
"IDLESTARTUP"
] |
[]
|
["PYTHONSTARTUP", "IDLESTARTUP"]
|
python
| 2 | 0 | |
bptc/testdata/bptc.go
|
package bptc
import (
"fmt"
"os"
"github.com/polkabana/go-dmr"
"github.com/polkabana/go-dmr/fec"
)
// deinterleave matrix
var dm = [256]uint8{}
var debug bool
func init() {
var i uint32
for i = 0; i < 0x100; i++ {
dm[i] = uint8((i * 181) % 196)
}
debug = os.Getenv("DEBUG_DMR_BPTC") != ""
}
func dump(bits []byte) {
for row := 0; row < 13; row++ {
if row == 0 {
fmt.Printf("col # ")
for col := 0; col < 15; col++ {
fmt.Printf("%02d ", col+1)
if col == 10 {
fmt.Print("| ")
}
}
fmt.Println("")
}
if row == 9 {
fmt.Println(" ------------------------------- ------------")
}
for col := 0; col < 15; col++ {
if col == 0 {
fmt.Printf("row #%02d: ", row+1)
}
fmt.Printf(" %d ", bits[col+row*15+1])
if col == 10 {
fmt.Print("| ")
}
}
fmt.Println("")
}
}
func Decode(info, data []byte) error {
if len(info) < 196 {
return fmt.Errorf("bptc: info size %d too small, need at least 196 bits", len(info))
}
if len(data) < 12 {
return fmt.Errorf("bptc: data size %d too small, need at least 12 bytes", len(data))
}
var (
i, j, k uint32
datafr = make([]byte, 196)
extracted = make([]byte, 96)
)
// Deinterleave bits
for i = 1; i < 197; i++ {
datafr[i-1] = info[dm[i]]
}
if debug {
dump(datafr)
}
// Zero reserved bits
for i = 0; i < 3; i++ {
datafr[0*15+i] = 0
}
for i = 0; i < 15; i++ {
var codeword uint32
for j = 0; j < 13; j++ {
codeword <<= 1
codeword |= uint32(datafr[j*15+i])
}
fec.Hamming15_11_3_Correct(&codeword)
codeword &= 0x01ff
for j = 0; j < 9; j++ {
datafr[j*15+i] = byte((codeword >> (8 - j)) & 1)
}
}
for j = 0; j < 9; j++ {
var codeword uint32
for i = 0; i < 15; i++ {
codeword <<= 1
codeword |= uint32(datafr[j*15+i])
}
fec.Hamming15_11_3_Correct(&codeword)
for i = 0; i < 11; i++ {
datafr[j*15+10-i] = byte((codeword >> i) & 1)
}
}
// Extract data bits
for i, k = 3, 0; i < 11; i, k = i+1, k+1 {
extracted[k] = datafr[0*15+i]
}
for j = 1; j < 9; j++ {
for i = 0; i < 11; i, k = i+1, k+1 {
extracted[k] = datafr[j*15+i]
}
}
copy(data, dmr.BitsToBytes(extracted))
return nil
}
func Encode(data, info []byte) error {
if len(data) < 12 {
return fmt.Errorf("bptc: data size %d too small, need at least 12 bytes", len(data))
}
if len(info) < 196 {
return fmt.Errorf("bptc: info size %d too small, need at least 196 bits", len(info))
}
var (
i, j, k uint32
datafr = make([]byte, 196)
extracted = make([]byte, 96)
)
copy(extracted, dmr.BytesToBits(data))
for i = 0; i < 9; i++ {
if i == 0 {
for j = 3; j < 11; j++ {
datafr[j+1] = extracted[k]
k++
}
} else {
for j = 0; j < 11; j++ {
datafr[j+i*15+1] = extracted[k]
k++
}
}
datafr[i*15+11+1] = 8
datafr[i*15+12+1] = 8
datafr[i*15+13+1] = 8
datafr[i*15+14+1] = 8
}
// Interleave bits
for i = 1; i < 197; i++ {
info[dm[i]] = datafr[i-1]
}
if debug {
dump(info)
}
return nil
}
|
[
"\"DEBUG_DMR_BPTC\""
] |
[] |
[
"DEBUG_DMR_BPTC"
] |
[]
|
["DEBUG_DMR_BPTC"]
|
go
| 1 | 0 | |
utils.go
|
package main
import (
"fmt"
"math"
"os"
"strconv"
"time"
)
func GetEnv() {
BTCapi = os.Getenv("BTC")
BTCTESTapi = os.Getenv("BTCTEST")
LTCapi = os.Getenv("LTC")
LTCTESTapi = os.Getenv("LTCTEST")
ETHapi = os.Getenv("ETH")
ROPSTENapi = os.Getenv("ROPSTEN")
RINKEBYapi = os.Getenv("RINKEBY")
DASHapi = os.Getenv("DASH")
ZCASHapi = os.Getenv("ZCASH")
BCHapi = os.Getenv("BCH")
BCHTESTapi = os.Getenv("BCHTEST")
}
func CoinMarketCapTicker() {
defer CoinMarketCapTicker()
fmt.Println("Loading Coin Market Cap Rates")
FetchCoinMarketCap()
time.Sleep(5 * time.Minute)
}
var renderFloatPrecisionMultipliers = [10]float64{
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
}
var renderFloatPrecisionRounders = [10]float64{
0.5,
0.05,
0.005,
0.0005,
0.00005,
0.000005,
0.0000005,
0.00000005,
0.000000005,
0.0000000005,
}
func RenderFloat(format string, amount string) string {
n, _ := strconv.ParseFloat(amount, 10)
if math.IsNaN(n) {
return "NaN"
}
if n > math.MaxFloat64 {
return "Infinity"
}
if n < -math.MaxFloat64 {
return "-Infinity"
}
precision := 2
decimalStr := "."
thousandStr := ","
positiveStr := ""
negativeStr := "-"
if len(format) > 0 {
precision = 9
thousandStr = ""
formatDirectiveChars := []rune(format)
formatDirectiveIndices := make([]int, 0)
for i, char := range formatDirectiveChars {
if char != '#' && char != '0' {
formatDirectiveIndices = append(formatDirectiveIndices, i)
}
}
if len(formatDirectiveIndices) > 0 {
if formatDirectiveIndices[0] == 0 {
if formatDirectiveChars[formatDirectiveIndices[0]] != '+' {
panic("RenderFloat(): invalid positive sign directive")
}
positiveStr = "+"
formatDirectiveIndices = formatDirectiveIndices[1:]
}
if len(formatDirectiveIndices) == 2 {
if (formatDirectiveIndices[1] - formatDirectiveIndices[0]) != 4 {
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
}
thousandStr = string(formatDirectiveChars[formatDirectiveIndices[0]])
formatDirectiveIndices = formatDirectiveIndices[1:]
}
if len(formatDirectiveIndices) == 1 {
decimalStr = string(formatDirectiveChars[formatDirectiveIndices[0]])
precision = len(formatDirectiveChars) - formatDirectiveIndices[0] - 1
}
}
}
var signStr string
if n >= 0.000000001 {
signStr = positiveStr
} else if n <= -0.000000001 {
signStr = negativeStr
n = -n
} else {
signStr = ""
n = 0.0
}
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
intStr := strconv.Itoa(int(intf))
if len(thousandStr) > 0 {
for i := len(intStr); i > 3; {
i -= 3
intStr = intStr[:i] + thousandStr + intStr[i:]
}
}
if precision == 0 {
return signStr + intStr
}
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
if len(fracStr) < precision {
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
}
return signStr + intStr + decimalStr + fracStr
}
|
[
"\"BTC\"",
"\"BTCTEST\"",
"\"LTC\"",
"\"LTCTEST\"",
"\"ETH\"",
"\"ROPSTEN\"",
"\"RINKEBY\"",
"\"DASH\"",
"\"ZCASH\"",
"\"BCH\"",
"\"BCHTEST\""
] |
[] |
[
"BCHTEST",
"BCH",
"LTCTEST",
"ETH",
"LTC",
"ZCASH",
"BTCTEST",
"DASH",
"BTC",
"RINKEBY",
"ROPSTEN"
] |
[]
|
["BCHTEST", "BCH", "LTCTEST", "ETH", "LTC", "ZCASH", "BTCTEST", "DASH", "BTC", "RINKEBY", "ROPSTEN"]
|
go
| 11 | 0 | |
src/main/java/com/company/master/Master.java
|
package com.company.master;
import com.company.rpc.query.QueryClient;
import com.company.server.storage.ServerDataStorage;
import com.company.util.Util;
import java.net.UnknownHostException;
public class Master implements Runnable {
private final ServerDataStorage storage;
private String[] inetAddresses;
public Master() {
this(new ServerDataStorage());
}
public Master(ServerDataStorage storage) {
this.storage = storage;
try {
inetAddresses = System.getenv("DESTINATIONS").split(",");
} catch (Exception e) {
inetAddresses = new String[] {"localhost"};
}
}
@Override
public void run() {
while (true) {
startAllQuery();
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
public static void main(String[] args) throws UnknownHostException, InterruptedException {
Master master = new Master();
Thread.sleep(15000);
while (true) {
master.startRandomQuery();
Thread.sleep(30000);
}
}
public ServerDataStorage getStorage() {
return storage;
}
private void startRandomQuery() {
var randomAddress = inetAddresses[Util.getRandomNumber(0, inetAddresses.length - 1)];
new Thread(new QueryClient(randomAddress, 8080, this)).start();
}
private void startAllQuery() {
for (var address : inetAddresses) {
new Thread(new QueryClient(address, 8080, this)).start();
}
}
public void startAllQueryBlocking() throws InterruptedException {
for (var address : inetAddresses) {
var t = new Thread(new QueryClient(address, 8080, this));
t.start();
t.join();
}
}
}
|
[
"\"DESTINATIONS\""
] |
[] |
[
"DESTINATIONS"
] |
[]
|
["DESTINATIONS"]
|
java
| 1 | 0 | |
noxfile.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("asyncmock", "pytest-asyncio")
session.install(
"mock", "pytest", "pytest-cov",
)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google/cloud",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install(
"mock", "pytest", "google-cloud-testutils",
)
session.install("-e", ".")
# If mtls is enabled via environment variable
# GOOGLE_API_USE_CLIENT_CERTIFICATE, then pyopenssl is needed. Here we
# install it regardless.
session.install("pyopenssl")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
# sphinx-docfx-yaml supports up to sphinx version 1.5.5.
# https://github.com/docascode/sphinx-docfx-yaml/issues/97
session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
[] |
[] |
[
"RUN_SYSTEM_TESTS",
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["RUN_SYSTEM_TESTS", "GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 2 | 0 | |
lib/Example/ExampleImpl.py
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
# The header block is where all import statments should live
import os
from Bio import SeqIO
from pprint import pprint, pformat
from AssemblyUtil.AssemblyUtilClient import AssemblyUtil
from KBaseReport.KBaseReportClient import KBaseReport
#END_HEADER
class Example:
'''
Module Name:
Example
Module Description:
A KBase module: Example
This sample module contains one small method - filter_contigs.
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
# Class variables and functions can be defined in this block
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
# Any configuration parameters that are important should be parsed and
# saved in the constructor.
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
#END_CONSTRUCTOR
pass
def filter_contigs(self, ctx, params):
"""
The actual function is declared using 'funcdef' to specify the name
and input/return arguments to the function. For all typical KBase
Apps that run in the Narrative, your function should have the
'authentication required' modifier.
:param params: instance of type "FilterContigsParams" (A 'typedef'
can also be used to define compound or container objects, like
lists, maps, and structures. The standard KBase convention is to
use structures, as shown here, to define the input and output of
your function. Here the input is a reference to the Assembly data
object, a workspace to save output, and a length threshold for
filtering. To define lists and maps, use a syntax similar to C++
templates to indicate the type contained in the list or map. For
example: list <string> list_of_strings; mapping <string, int>
map_of_ints;) -> structure: parameter "assembly_input_ref" of type
"assembly_ref" (A 'typedef' allows you to provide a more specific
name for a type. Built-in primitive types include 'string',
'int', 'float'. Here we define a type named assembly_ref to
indicate a string that should be set to a KBase ID reference to an
Assembly data object.), parameter "workspace_name" of String,
parameter "min_length" of Long
:returns: instance of type "FilterContigsResults" (Here is the
definition of the output of the function. The output can be used
by other SDK modules which call your code, or the output
visualizations in the Narrative. 'report_name' and 'report_ref'
are special output fields- if defined, the Narrative can
automatically render your Report.) -> structure: parameter
"report_name" of String, parameter "report_ref" of String,
parameter "assembly_output" of type "assembly_ref" (A 'typedef'
allows you to provide a more specific name for a type. Built-in
primitive types include 'string', 'int', 'float'. Here we define
a type named assembly_ref to indicate a string that should be set
to a KBase ID reference to an Assembly data object.), parameter
"n_initial_contigs" of Long, parameter "n_contigs_removed" of
Long, parameter "n_contigs_remaining" of Long
"""
# ctx is the context object
# return variables are: output
#BEGIN filter_contigs
# Print statements to stdout/stderr are captured and available as the App log
print('Starting Filter Contigs function. Params=')
pprint(params)
# Step 1 - Parse/examine the parameters and catch any errors
# It is important to check that parameters exist and are defined, and that nice error
# messages are returned to users. Parameter values go through basic validation when
# defined in a Narrative App, but advanced users or other SDK developers can call
# this function directly, so validation is still important.
print('Validating parameters.')
if 'workspace_name' not in params:
raise ValueError('Parameter workspace_name is not set in input arguments')
workspace_name = params['workspace_name']
if 'assembly_input_ref' not in params:
raise ValueError('Parameter assembly_input_ref is not set in input arguments')
assembly_input_ref = params['assembly_input_ref']
if 'min_length' not in params:
raise ValueError('Parameter min_length is not set in input arguments')
min_length_orig = params['min_length']
min_length = None
try:
min_length = int(min_length_orig)
except ValueError:
raise ValueError('Cannot parse integer from min_length parameter (' + str(min_length_orig) + ')')
if min_length < 0:
raise ValueError('min_length parameter cannot be negative (' + str(min_length) + ')')
# Step 2 - Download the input data as a Fasta and
# We can use the AssemblyUtils module to download a FASTA file from our Assembly data object.
# The return object gives us the path to the file that was created.
print('Downloading Assembly data as a Fasta file.')
assemblyUtil = AssemblyUtil(self.callback_url)
fasta_file = assemblyUtil.get_assembly_as_fasta({'ref': assembly_input_ref})
# Step 3 - Actually perform the filter operation, saving the good contigs to a new fasta file.
# We can use BioPython to parse the Fasta file and build and save the output to a file.
good_contigs = []
n_total = 0
n_remaining = 0
for record in SeqIO.parse(fasta_file['path'], 'fasta'):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
print('Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total))
filtered_fasta_file = os.path.join(self.shared_folder, 'filtered.fasta')
SeqIO.write(good_contigs, filtered_fasta_file, 'fasta')
# Step 4 - Save the new Assembly back to the system
print('Uploading filtered Assembly data.')
new_assembly = assemblyUtil.save_assembly_from_fasta({'file': {'path': filtered_fasta_file},
'workspace_name': workspace_name,
'assembly_name': fasta_file['assembly_name']
})
# Step 5 - Build a Report and return
reportObj = {
'objects_created': [{'ref': new_assembly, 'description': 'Filtered contigs'}],
'text_message': 'Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total)
}
report = KBaseReport(self.callback_url)
report_info = report.create({'report': reportObj, 'workspace_name': params['workspace_name']})
# STEP 6: contruct the output to send back
output = {'report_name': report_info['name'],
'report_ref': report_info['ref'],
'assembly_output': new_assembly,
'n_initial_contigs': n_total,
'n_contigs_removed': n_total - n_remaining,
'n_contigs_remaining': n_remaining
}
print('returning:' + pformat(output))
#END filter_contigs
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method filter_contigs return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
[] |
[] |
[
"SDK_CALLBACK_URL"
] |
[]
|
["SDK_CALLBACK_URL"]
|
python
| 1 | 0 | |
cmds/opcflux/main.go
|
//set OPC_SERVER=Graybox.Simulator && set OPC_NODES=127.0.0.1,localhost && go run main.go -conf influx.yml -rate 100ms
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/influxdata/influxdb/client/v2"
"github.com/konimarti/opc"
govaluate "gopkg.in/Knetic/govaluate.v3"
yaml "gopkg.in/yaml.v2"
)
var (
config = flag.String("conf", "influx.yml", "yaml config file for tag descriptions")
rr = flag.String("rate", "10s", "refresh rate as duration, e.g. 100ms, 5s, 10s, 2m")
)
// M stores an InfluxDB measurement
type M struct {
Tags map[string]string
Fields map[string]string
}
// Database represents an InfluxDB database connection
type Database struct {
Addr string
Username string
Password string
Database string
Precision string
}
// Conf contains config data
type Conf struct {
Server string
Nodes []string
Monitoring string
Influx Database
Measurements map[string][]M
}
func main() {
flag.Parse()
//set refresh rate
refreshRate, err := time.ParseDuration(*rr)
if err != nil {
log.Fatalf("error setting refresh rate")
}
fmt.Println("refresh rate: ", refreshRate)
// read config
conf := getConfig(*config)
//app monitoring
if conf.Monitoring != "" {
opc.StartMonitoring(conf.Monitoring)
}
// extract tags
tags := []string{}
exprMap := make(map[string]*govaluate.EvaluableExpression)
for _, group := range conf.Measurements {
for _, m := range group {
for _, f := range m.Fields {
expr, err := govaluate.NewEvaluableExpression(f)
if err != nil {
fmt.Println("Could not parse", f)
panic(err)
}
exprMap[f] = expr
tags = append(tags, expr.Vars()...)
}
}
}
//setup influxdb client
//TODO: get username and password for influx from environment variables
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: conf.Influx.Addr,
//Username: conf.Influx.Username,
//Password: conf.Influx.Password,
})
if err != nil {
fmt.Println(err)
panic("Error creating InfluxDB Client")
}
defer c.Close()
fmt.Println("Writing to", conf.Influx.Database, "at", conf.Influx.Addr)
if conf.Server == "" {
conf.Server = strings.Trim(os.Getenv("OPC_SERVER"), " ")
}
if len(conf.Nodes) == 0 {
conf.Nodes = strings.Split(os.Getenv("OPC_NODES"), ",")
}
conn, err := opc.NewConnection(
conf.Server,
conf.Nodes,
tags,
)
if err != nil {
fmt.Println("Could not create OPC connection.")
panic(err)
}
timeC := make(chan time.Time, 10)
// start go routine
go writeState(timeC, c, conn, conf, exprMap)
// start ticker
ticker := time.NewTicker(refreshRate)
for tick := range ticker.C {
timeC <- tick
}
}
// getConfig parses configuration file
func getConfig(config string) *Conf {
log.Println("config file: ", config)
content, err := ioutil.ReadFile(config)
if err != nil {
log.Fatalf("error reading config file %s", config)
}
conf := Conf{}
err = yaml.Unmarshal([]byte(content), &conf)
if err != nil {
log.Fatalf("error yaml unmarshalling: %v", err)
}
// fmt.Printf("--- conf:\n%v\n\n", conf)
return &conf
}
// writeState collects data and writes it to the influx database
func writeState(timeC chan time.Time, c client.Client, conn opc.Connection, conf *Conf, exprMap map[string]*govaluate.EvaluableExpression) {
batchconfig := client.BatchPointsConfig{
Database: conf.Influx.Database,
Precision: conf.Influx.Precision, // "s"
}
for t := range timeC {
// read data
data := adapter(conn.Read())
// create a new point batch
bp, err := client.NewBatchPoints(batchconfig)
if err != nil {
fmt.Println(err)
return
}
// define measurement and create data points
for measurement, group := range conf.Measurements {
//t := time.Now().Local()
//data := conn.Read()
for _, m := range group {
tagMap := m.Tags
fieldMap := make(map[string]interface{})
for fieldKey, f := range m.Fields {
ist, err := exprMap[f].Evaluate(data)
if err != nil {
fmt.Println(err)
continue
}
fieldMap[fieldKey] = ist
}
// create influx data points
pt, err := client.NewPoint(measurement, tagMap, fieldMap, t)
if err != nil {
fmt.Println("Error: ", err.Error())
}
// add data point to batch
bp.AddPoint(pt)
}
}
// write to database
if err := c.Write(bp); err != nil {
fmt.Println(err)
}
}
}
func adapter(input map[string]opc.Item) map[string]interface{} {
output := make(map[string]interface{})
for key, item := range input {
output[key] = item.Value
}
return output
}
|
[
"\"OPC_SERVER\"",
"\"OPC_NODES\""
] |
[] |
[
"OPC_NODES",
"OPC_SERVER"
] |
[]
|
["OPC_NODES", "OPC_SERVER"]
|
go
| 2 | 0 | |
test/bdd/bddtests_test.go
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package bdd
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/cucumber/godog"
"github.com/hyperledger/aries-framework-go/pkg/common/log"
"github.com/hyperledger/aries-framework-go/test/bdd/agent"
"github.com/hyperledger/aries-framework-go/test/bdd/dockerutil"
bddctx "github.com/hyperledger/aries-framework-go/test/bdd/pkg/context"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/didexchange"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/didresolver"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/introduce"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/issuecredential"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/mediator"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/messaging"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/outofband"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/presentproof"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/redeemableroutes"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/verifiable"
)
const (
SideTreeURL = "${SIDETREE_URL}"
)
var (
composition []*dockerutil.Composition
composeFiles = []string{"./fixtures/agent-rest", "./fixtures/sidetree-mock"}
)
// Feature of the aries framework under test.
type feature interface {
// SetContext is called before every scenario is run with a fresh new context
SetContext(*bddctx.BDDContext)
// invoked once to register the steps on the suite
RegisterSteps(*godog.Suite)
}
func TestMain(m *testing.M) {
// default is to run all tests with tag @all
tags := "all"
flag.Parse()
format := "progress"
if getCmdArg("test.v") == "true" {
format = "pretty"
}
runArg := getCmdArg("test.run")
if runArg != "" {
tags = runArg
}
agentLogLevel := os.Getenv("AGENT_LOG_LEVEL")
if agentLogLevel != "" {
logLevel, err := log.ParseLevel(agentLogLevel)
if err != nil {
panic(err)
}
log.SetLevel(os.Getenv("AGENT_LOG_MODULE"), logLevel)
}
status := runBddTests(tags, format)
if st := m.Run(); st > status {
status = st
}
os.Exit(status)
}
//nolint:gocognit
func runBddTests(tags, format string) int {
return godog.RunWithOptions("godogs", func(s *godog.Suite) {
s.BeforeSuite(func() {
if os.Getenv("DISABLE_COMPOSITION") != "true" {
// Need a unique name, but docker does not allow '-' in names
composeProjectName := strings.ReplaceAll(generateUUID(), "-", "")
for _, v := range composeFiles {
newComposition, err := dockerutil.NewComposition(composeProjectName, "docker-compose.yml", v)
if err != nil {
panic(fmt.Sprintf("Error composing system in BDD context: %s", err))
}
composition = append(composition, newComposition)
}
fmt.Println("docker-compose up ... waiting for containers to start ...")
testSleep := 15
if os.Getenv("TEST_SLEEP") != "" {
var e error
testSleep, e = strconv.Atoi(os.Getenv("TEST_SLEEP"))
if e != nil {
panic(fmt.Sprintf("Invalid value found in 'TEST_SLEEP': %s", e))
}
}
fmt.Printf("*** testSleep=%d", testSleep)
time.Sleep(time.Second * time.Duration(testSleep))
}
})
s.AfterSuite(func() {
for _, c := range composition {
if c != nil {
if err := c.GenerateLogs(c.Dir, c.ProjectName+".log"); err != nil {
panic(err)
}
if _, err := c.Decompose(c.Dir); err != nil {
panic(err)
}
}
}
})
FeatureContext(s)
}, godog.Options{
Tags: tags,
Format: format,
Paths: []string{"features"},
Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order
Strict: true,
StopOnFailure: true,
})
}
func getCmdArg(argName string) string {
cmdTags := flag.CommandLine.Lookup(argName)
if cmdTags != nil && cmdTags.Value != nil && cmdTags.Value.String() != "" {
return cmdTags.Value.String()
}
return ""
}
// generateUUID returns a UUID based on RFC 4122.
func generateUUID() string {
id := dockerutil.GenerateBytesUUID()
return fmt.Sprintf("%x-%x-%x-%x-%x", id[0:4], id[4:6], id[6:8], id[8:10], id[10:])
}
func FeatureContext(s *godog.Suite) {
features := features()
for _, f := range features {
f.RegisterSteps(s)
}
var bddContext *bddctx.BDDContext
s.BeforeScenario(func(interface{}) {
bddContext = bddctx.NewBDDContext()
// set dynamic args
bddContext.Args[SideTreeURL] = "http://localhost:48326/sidetree/v1/"
for _, f := range features {
f.SetContext(bddContext)
}
})
s.AfterScenario(func(_ interface{}, _ error) {
bddContext.Destroy()
})
}
func features() []feature {
return []feature{
agent.NewSDKSteps(),
agent.NewControllerSteps(),
didexchange.NewDIDExchangeSDKSteps(),
didexchange.NewDIDExchangeControllerSteps(),
introduce.NewIntroduceSDKSteps(),
introduce.NewIntroduceControllerSteps(),
issuecredential.NewIssueCredentialSDKSteps(),
issuecredential.NewIssueCredentialControllerSteps(),
didresolver.NewDIDResolverSteps(),
messaging.NewMessagingSDKSteps(),
messaging.NewMessagingControllerSteps(),
mediator.NewRouteSDKSteps(),
mediator.NewRouteRESTSteps(),
verifiable.NewVerifiableCredentialSDKSteps(),
outofband.NewOutOfBandSDKSteps(),
outofband.NewOutofbandControllerSteps(),
presentproof.NewPresentProofSDKSteps(),
presentproof.NewPresentProofControllerSteps(),
redeemableroutes.NewBDDSteps(),
}
}
|
[
"\"AGENT_LOG_LEVEL\"",
"\"AGENT_LOG_MODULE\"",
"\"DISABLE_COMPOSITION\"",
"\"TEST_SLEEP\"",
"\"TEST_SLEEP\""
] |
[] |
[
"DISABLE_COMPOSITION",
"AGENT_LOG_LEVEL",
"TEST_SLEEP",
"AGENT_LOG_MODULE"
] |
[]
|
["DISABLE_COMPOSITION", "AGENT_LOG_LEVEL", "TEST_SLEEP", "AGENT_LOG_MODULE"]
|
go
| 4 | 0 | |
tourist_attractions/wsgi.py
|
"""
WSGI config for tourist_attractions project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tourist_attractions.settings.base')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
interferogram/stitch_ifgs.py
|
#!/usr/bin/env python3
from __future__ import absolute_import
from builtins import str
import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math
from itertools import chain
from subprocess import check_call, CalledProcessError
from glob import glob
from lxml.etree import parse
import numpy as np
from datetime import datetime
from osgeo import ogr, osr
from utils.UrlUtils import UrlUtils
from utils.createImage import createImage
from .sentinel.check_interferogram import check_int
log_format = "[%(asctime)s: %(levelname)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('stitch_ifgs')
BASE_PATH = os.path.dirname(__file__)
def get_version():
"""Get dataset version."""
DS_VERS_CFG = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'conf', 'dataset_versions.json'))
with open(DS_VERS_CFG) as f:
ds_vers = json.load(f)
return ds_vers['S1-GUNW-MERGED-STITCHED']
def get_union_polygon(ds_files):
"""Get GeoJSON polygon of union of IFGs."""
geom_union = None
for ds_file in ds_files:
with open(ds_file) as f:
ds = json.load(f)
geom = ogr.CreateGeometryFromJson(json.dumps(ds['location'], indent=2, sort_keys=True))
if geom_union is None: geom_union = geom
else: geom_union = geom_union.Union(geom)
return json.loads(geom_union.ExportToJson()), geom_union.GetEnvelope()
def get_times(ds_files):
"""Get starttimes and endtimes."""
starttimes = []
endtimes = []
for ds_file in ds_files:
with open(ds_file) as f:
ds = json.load(f)
starttimes.append(ds['starttime'])
endtimes.append(ds['endtime'])
return starttimes, endtimes
def create_dataset_json(id, version, ds_files, ds_json_file):
"""Create HySDS dataset json file."""
# get union polygon
location, env = get_union_polygon(ds_files)
logger.info("union polygon: {}.".format(json.dumps(location, indent=2, sort_keys=True)))
# get starttime and endtimes
starttimes, endtimes = get_times(ds_files)
starttimes.sort()
endtimes.sort()
starttime = starttimes[0]
endtime = endtimes[-1]
# build dataset
ds = {
'creation_timestamp': "%sZ" % datetime.utcnow().isoformat(),
'version': version,
'label': id,
'location': location,
'starttime': starttime,
'endtime': endtime,
}
# write out dataset json
with open(ds_json_file, 'w') as f:
json.dump(ds, f, indent=2)
# return envelope and times
return env, starttime, endtime
def create_met_json(id, version, env, starttime, endtime, met_files, met_json_file, direction):
"""Create HySDS met json file."""
# build met
bbox = [
[ env[3], env[0] ],
[ env[3], env[1] ],
[ env[2], env[1] ],
[ env[2], env[0] ],
]
met = {
'stitch_direction': direction,
'product_type': 'interferogram',
'master_scenes': [],
'refbbox': [],
'esd_threshold': [],
'frame_id': [],
'temporal_span': None,
'track_number': None,
'archive_filename': id,
'dataset_type': 'slc',
'tile_layers': [ 'interferogram' ],
'latitude_index_min': int(math.floor(env[2] * 10)),
'latitude_index_max': int(math.ceil(env[3] * 10)),
'parallel_baseline': [],
'url': [],
'doppler': [],
'version': [],
'slave_scenes': [],
'orbit_type': [],
'frame_number': None,
'reference': None,
'bbox': bbox,
'ogr_bbox': [[x, y] for y, x in bbox],
'orbit_number': [],
'inputFile': 'ifg_stitch.json',
'perpendicular_baseline': [],
'orbit_repeat': [],
'sensing_stop': endtime,
'polarization': [],
'scene_count': 0,
'beamID': None,
'sensor': [],
'look_direction': [],
'platform': [],
'starting_range': [],
'frame_name': [],
'tiles': True,
'sensing_start': starttime,
'beam_mode': [],
'image_corners': [],
'orbit_direction': [],
'prf': [],
"sha224sum": hashlib.sha224(str.encode(os.path.basename(met_json_file))).hexdigest(),
}
# collect values
set_params = ('master_scenes', 'esd_threshold', 'frame_id', 'parallel_baseline',
'doppler', 'version', 'slave_scenes', 'orbit_type', 'orbit_number',
'perpendicular_baseline', 'orbit_repeat', 'polarization',
'sensor', 'look_direction', 'platform', 'starting_range',
'beam_mode', 'orbit_direction', 'prf' )
single_params = ('temporal_span', 'track_number')
list_params = ('master_scenes', 'slave_scenes', 'platform', 'perpendicular_baseline', 'parallel_baseline')
mean_params = ('perpendicular_baseline', 'parallel_baseline')
for i, met_file in enumerate(met_files):
with open(met_file) as f:
md = json.load(f)
for param in set_params:
#logger.info("param: {}".format(param))
if isinstance(md[param], list):
met[param].extend(md[param])
else:
met[param].append(md[param])
if i == 0:
for param in single_params:
met[param] = md[param]
met['scene_count'] += 1
for param in set_params:
tmp_met = list(set(met[param]))
if param in list_params:
met[param] = tmp_met
else:
met[param] = tmp_met[0] if len(tmp_met) == 1 else tmp_met
for param in mean_params:
met[param] = np.mean(met[param])
# write out dataset json
with open(met_json_file, 'w') as f:
json.dump(met, f, indent=2)
def ifg_exists(es_url, es_index, id):
"""Check interferogram exists in GRQ."""
total, id = check_int(es_url, es_index, id)
if total > 0: return True
return False
def call_noerr(cmd):
"""Run command and warn if exit status is not 0."""
try: check_call(cmd, shell=True)
except Exception as e:
logger.warn("Got exception running {}: {}".format(cmd, str(e)))
logger.warn("Traceback: {}".format(traceback.format_exc()))
def main():
"""HySDS PGE wrapper for TopsInSAR interferogram generation."""
# save cwd (working directory)
cwd = os.getcwd()
# get context
ctx_file = os.path.abspath('_context.json')
if not os.path.exists(ctx_file):
raise RuntimeError("Failed to find _context.json.")
with open(ctx_file) as f:
ctx = json.load(f)
logger.info("ctx: {}".format(json.dumps(ctx, indent=2, sort_keys=True)))
# get args
project = ctx['project']
direction = ctx.get('direction', 'along')
extra_products = ctx.get('extra_products', [])
filenames = ctx['filenames']
outname = 'filt_topophase.unw.geo'
# get id base
id_base = ctx['id']
logger.info("Product base ID: {}".format(id_base))
# get dataset version and set dataset ID
version = get_version()
id = "{}-{}-{}".format(id_base, version, re.sub("[^a-zA-Z0-9_]", "_", ctx.get("context", {})
.get("dataset_tag", "standard")))
# get endpoint configurations
uu = UrlUtils()
es_url = uu.rest_url
es_index = "{}_{}_s1-gunw-merged-stitched".format(uu.grq_index_prefix, version)
# check if interferogram already exists
logger.info("GRQ url: {}".format(es_url))
logger.info("GRQ index: {}".format(es_index))
logger.info("Product ID for version {}: {}".format(version, id))
if ifg_exists(es_url, es_index, id):
logger.info("{} interferogram for {}".format(version, id_base) +
" was previously generated and exists in GRQ database.")
# cleanup IFG dirs
for i in [os.path.split(fname)[0] for swath_list in filenames for fname in swath_list]:
logger.info("Removing {}.".format(i))
try: shutil.rmtree(i)
except: pass
return 0
# create product directory
dataset_dir = os.path.abspath(id)
os.makedirs(dataset_dir, 0o755)
# dump input file
inp = {
'direction': direction,
'extra_products': extra_products,
'filenames': filenames,
'outname': outname,
}
ifg_stitch_file = os.path.join(dataset_dir, "ifg_stitch.json")
with open(ifg_stitch_file, 'w') as f:
json.dump(inp, f, indent=2)
# run stitccher
stc_cmd = [
"python3", os.path.join(BASE_PATH, "ifg_stitcher.py"), ifg_stitch_file
]
stc_cmd_line = " ".join(stc_cmd)
logger.info("Calling ifg_stitcher.py: {}".format(stc_cmd_line))
check_call(stc_cmd_line, shell=True)
# generate GDAL (ENVI) headers and move to product directory
raster_prods = [
'filt_topophase.unw.geo',
'filt_topophase.unw.conncomp.geo',
'phsig.cor.geo',
]
raster_prods.extend(extra_products)
for j in raster_prods:
if not os.path.exists(j): continue
gdal_xml = "{}.xml".format(j)
gdal_hdr = "{}.hdr".format(j)
#gdal_tif = "{}.tif".format(j)
gdal_vrt = "{}.vrt".format(j)
if os.path.exists(j): shutil.move(j, dataset_dir)
else: logger.warn("{} wasn't generated.".format(j))
if os.path.exists(gdal_xml): shutil.move(gdal_xml, dataset_dir)
else: logger.warn("{} wasn't generated.".format(gdal_xml))
if os.path.exists(gdal_hdr): shutil.move(gdal_hdr, dataset_dir)
else: logger.warn("{} wasn't generated.".format(gdal_hdr))
if os.path.exists(gdal_vrt): shutil.move(gdal_vrt, dataset_dir)
else: logger.warn("{} wasn't generated.".format(gdal_vrt))
# save other files to product directory
shutil.copyfile("_context.json", os.path.join(dataset_dir,"{}.context.json".format(id)))
if os.path.exists('isce.log'):
shutil.copyfile("isce.log", os.path.join(dataset_dir, "isce.log"))
if os.path.exists('stitch_ifgs.log'):
shutil.copyfile("stitch_ifgs.log", os.path.join(dataset_dir, "stitch_ifgs.log"))
# create browse images
os.chdir(dataset_dir)
mdx_app_path = "{}/applications/mdx.py".format(os.environ['ISCE_HOME'])
mdx_path = "{}/bin/mdx".format(os.environ['ISCE_HOME'])
unw_file = "filt_topophase.unw.geo"
#unwrapped image at different rates
createImage("{} -P {}".format(mdx_app_path, unw_file),unw_file)
#createImage("{} -P {} -wrap {}".format(mdx_app_path, unw_file, rad),unw_file + "_5cm")
createImage("{} -P {} -wrap 20".format(mdx_app_path, unw_file),unw_file + "_20rad")
#amplitude image
unw_xml = "filt_topophase.unw.geo.xml"
rt = parse(unw_xml)
size = eval(rt.xpath('.//component[@name="coordinate1"]/property[@name="size"]/value/text()')[0])
rtlr = size * 4
logger.info("rtlr value for amplitude browse is: {}".format(rtlr))
createImage("{} -P {} -s {} -amp -r4 -rtlr {} -CW".format(mdx_path, unw_file, size, rtlr), 'amplitude.geo')
#coherence image
#top_file = "topophase.cor.geo"
#createImage("{} -P {}".format(mdx_app_path, top_file),top_file)
#should be the same size as unw but just in case
#top_xml = "topophase.cor.geo.xml"
#rt = parse(top_xml)
#size = eval(rt.xpath('.//component[@name="coordinate1"]/property[@name="size"]/value/text()')[0])
#rhdr = size * 4
#createImage("{} -P {} -s {} -r4 -rhdr {} -cmap cmy -wrap 1.2".format(mdx_path, top_file,size,rhdr),"topophase_ph_only.cor.geo")
# create unw KMZ
unw_kml = "unw.geo.kml"
unw_kmz = "{}.kmz".format(id)
call_noerr("{} {} -kml {}".format(mdx_app_path, unw_file, unw_kml))
call_noerr("{}/sentinel/create_kmz.py {} {}.png {}".format(BASE_PATH, unw_kml, unw_file, unw_kmz))
# remove kml
call_noerr("rm -f *.kml")
# chdir back up to work directory
os.chdir(cwd)
# create displacement tile layer
tiles_dir = "{}/tiles".format(dataset_dir)
vrt_prod_file = "{}/filt_topophase.unw.geo.vrt".format(dataset_dir)
tiler_cmd_path = os.path.abspath(os.path.join(BASE_PATH, '..', 'map_tiler'))
dis_layer = "interferogram"
tiler_cmd_tmpl = "{}/create_tiles.py {} {}/{} -b 2 -m prism --nodata 0"
call_noerr(tiler_cmd_tmpl.format(tiler_cmd_path, vrt_prod_file, tiles_dir, dis_layer))
# create amplitude tile layer
#amp_layer = "amplitude"
#tiler_cmd_tmpl = "{}/create_tiles.py {} {}/{} -b 1 -m gray --clim_min 10 --clim_max_pct 80 --nodata 0"
#call_noerr(tiler_cmd_tmpl.format(tiler_cmd_path, vrt_prod_file, tiles_dir, amp_layer))
# create COG (cloud optimized geotiff) with no_data set
cog_prod_file = "{}/filt_topophase.unw.geo.tif".format(dataset_dir)
cog_cmd_tmpl = "gdal_translate {} tmp.tif -co TILED=YES -co COMPRESS=DEFLATE -a_nodata 0"
check_call(cog_cmd_tmpl.format(vrt_prod_file), shell=True)
check_call("gdaladdo -r average tmp.tif 2 4 8 16 32", shell=True)
cog_cmd_tmpl = "gdal_translate tmp.tif {} -co TILED=YES -co COPY_SRC_OVERVIEWS=YES -co BLOCKXSIZE=512 -co BLOCKYSIZE=512 --config GDAL_TIFF_OVR_BLOCKSIZE 512"
check_call(cog_cmd_tmpl.format(cog_prod_file), shell=True)
os.unlink("tmp.tif")
# get list of dataset and met files
dsets = []
mets = []
for i in [os.path.dirname(os.path.dirname(fname)) for swath_list in filenames for fname in swath_list]:
dsets.append(os.path.join(i, "_{}.dataset.json".format(i)))
mets.append(os.path.join(i, "_{}.met.json".format(i)))
logger.info("Datasets: {}.".format(dsets))
logger.info("Mets: {}.".format(mets))
# create dataset json
ds_json_file = os.path.join(dataset_dir, "{}.dataset.json".format(id))
envelope, starttime, endtime = create_dataset_json(id, version, dsets, ds_json_file)
# create met json
met_json_file = os.path.join(dataset_dir, "{}.met.json".format(id))
create_met_json(id, version, envelope, starttime, endtime, mets, met_json_file, direction)
# cleanup IFG dirs
for i in [os.path.split(fname)[0] for swath_list in filenames for fname in swath_list]:
logger.info("Removing {}.".format(i))
try: shutil.rmtree(i)
except: pass
if __name__ == '__main__':
try: status = main()
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status)
|
[] |
[] |
[
"ISCE_HOME"
] |
[]
|
["ISCE_HOME"]
|
python
| 1 | 0 | |
vendor/github.com/okzk/sdnotify/notify_linux.go
|
package sdnotify
import (
"net"
"os"
)
// SdNotify sends a specified string to the systemd notification socket.
func SdNotify(state string) error {
name := os.Getenv("NOTIFY_SOCKET")
if name == "" {
return ErrSdNotifyNoSocket
}
conn, err := net.DialUnix("unixgram", nil, &net.UnixAddr{Name: name, Net: "unixgram"})
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte(state))
return err
}
|
[
"\"NOTIFY_SOCKET\""
] |
[] |
[
"NOTIFY_SOCKET"
] |
[]
|
["NOTIFY_SOCKET"]
|
go
| 1 | 0 | |
cli/src/main/java/hudson/cli/CLI.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2009, Sun Microsystems, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.cli;
import com.trilead.ssh2.crypto.PEMDecoder;
import hudson.cli.client.Messages;
import hudson.remoting.Channel;
import hudson.remoting.PingThread;
import hudson.remoting.Pipe;
import hudson.remoting.RemoteInputStream;
import hudson.remoting.RemoteOutputStream;
import hudson.remoting.SocketInputStream;
import hudson.remoting.SocketOutputStream;
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSession;
import javax.net.ssl.TrustManager;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.io.StringReader;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.KeyPair;
import java.security.PublicKey;
import java.security.SecureRandom;
import java.security.Signature;
import java.security.spec.DSAPrivateKeySpec;
import java.security.spec.DSAPublicKeySpec;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.io.Console;
import static java.util.logging.Level.*;
/**
* CLI entry point to Jenkins.
*
* @author Kohsuke Kawaguchi
*/
public class CLI {
private final ExecutorService pool;
private final Channel channel;
private final CliEntryPoint entryPoint;
private final boolean ownsPool;
private final List<Closeable> closables = new ArrayList<Closeable>(); // stuff to close in the close method
private final String httpsProxyTunnel;
private final String authorization;
public CLI(URL jenkins) throws IOException, InterruptedException {
this(jenkins,null);
}
/**
* @deprecated
* Use {@link CLIConnectionFactory} to create {@link CLI}
*/
public CLI(URL jenkins, ExecutorService exec) throws IOException, InterruptedException {
this(jenkins,exec,null);
}
/**
* @deprecated
* Use {@link CLIConnectionFactory} to create {@link CLI}
*/
public CLI(URL jenkins, ExecutorService exec, String httpsProxyTunnel) throws IOException, InterruptedException {
this(new CLIConnectionFactory().url(jenkins).executorService(exec).httpsProxyTunnel(httpsProxyTunnel));
}
/*package*/ CLI(CLIConnectionFactory factory) throws IOException, InterruptedException {
URL jenkins = factory.jenkins;
this.httpsProxyTunnel = factory.httpsProxyTunnel;
this.authorization = factory.authorization;
ExecutorService exec = factory.exec;
String url = jenkins.toExternalForm();
if(!url.endsWith("/")) url+='/';
ownsPool = exec==null;
pool = exec!=null ? exec : Executors.newCachedThreadPool();
Channel _channel;
try {
_channel = connectViaCliPort(jenkins, getCliTcpPort(url));
} catch (IOException e) {
LOGGER.log(Level.FINE,"Failed to connect via CLI port. Falling back to HTTP",e);
try {
_channel = connectViaHttp(url);
} catch (IOException e2) {
try { // Java 7: e.addSuppressed(e2);
Throwable.class.getMethod("addSuppressed", Throwable.class).invoke(e, e2);
} catch (NoSuchMethodException _ignore) {
// Java 6
} catch (Exception _huh) {
LOGGER.log(Level.SEVERE, null, _huh);
}
throw e;
}
}
this.channel = _channel;
// execute the command
entryPoint = (CliEntryPoint)_channel.waitForRemoteProperty(CliEntryPoint.class.getName());
if(entryPoint.protocolVersion()!=CliEntryPoint.VERSION)
throw new IOException(Messages.CLI_VersionMismatch());
}
private Channel connectViaHttp(String url) throws IOException {
LOGGER.fine("Trying to connect to "+url+" via HTTP");
url+="cli";
URL jenkins = new URL(url);
FullDuplexHttpStream con = new FullDuplexHttpStream(jenkins,authorization);
Channel ch = new Channel("Chunked connection to "+jenkins,
pool,con.getInputStream(),con.getOutputStream());
final long interval = 15*1000;
final long timeout = (interval * 3) / 4;
new PingThread(ch,timeout,interval) {
protected void onDead() {
// noop. the point of ping is to keep the connection alive
// as most HTTP servers have a rather short read time out
}
}.start();
return ch;
}
private Channel connectViaCliPort(URL jenkins, CliPort clip) throws IOException {
LOGGER.fine("Trying to connect directly via TCP/IP to "+clip.endpoint);
final Socket s;
OutputStream out;
if (httpsProxyTunnel!=null) {
String[] tokens = httpsProxyTunnel.split(":");
s = new Socket(tokens[0], Integer.parseInt(tokens[1]));
PrintStream o = new PrintStream(s.getOutputStream());
o.print("CONNECT " + clip.endpoint.getHostName() + ":" + clip.endpoint.getPort() + " HTTP/1.0\r\n\r\n");
// read the response from the proxy
ByteArrayOutputStream rsp = new ByteArrayOutputStream();
while (!rsp.toString().endsWith("\r\n\r\n")) {
int ch = s.getInputStream().read();
if (ch<0) throw new IOException("Failed to read the HTTP proxy response: "+rsp);
rsp.write(ch);
}
String head = new BufferedReader(new StringReader(rsp.toString())).readLine();
if (!head.startsWith("HTTP/1.0 200 "))
throw new IOException("Failed to establish a connection through HTTP proxy: "+rsp);
// HTTP proxies (at least the one I tried --- squid) doesn't seem to do half-close very well.
// So instead of relying on it, we'll just send the close command and then let the server
// cut their side, then close the socket after the join.
out = new SocketOutputStream(s) {
@Override
public void close() throws IOException {
// ignore
}
};
} else {
s = new Socket();
s.connect(clip.endpoint,3000);
out = new SocketOutputStream(s);
}
closables.add(new Closeable() {
public void close() throws IOException {
s.close();
}
});
Connection c = new Connection(new SocketInputStream(s),out);
switch (clip.version) {
case 1:
DataOutputStream dos = new DataOutputStream(s.getOutputStream());
dos.writeUTF("Protocol:CLI-connect");
// we aren't checking greeting from the server here because I'm too lazy. It gets ignored by Channel constructor.
break;
case 2:
DataInputStream dis = new DataInputStream(s.getInputStream());
dos = new DataOutputStream(s.getOutputStream());
dos.writeUTF("Protocol:CLI2-connect");
String greeting = dis.readUTF();
if (!greeting.equals("Welcome"))
throw new IOException("Handshaking failed: "+greeting);
try {
byte[] secret = c.diffieHellman(false).generateSecret();
SecretKey sessionKey = new SecretKeySpec(Connection.fold(secret,128/8),"AES");
c = c.encryptConnection(sessionKey,"AES/CFB8/NoPadding");
// validate the instance identity, so that we can be sure that we are talking to the same server
// and there's no one in the middle.
byte[] signature = c.readByteArray();
if (clip.identity!=null) {
Signature verifier = Signature.getInstance("SHA1withRSA");
verifier.initVerify(clip.getIdentity());
verifier.update(secret);
if (!verifier.verify(signature))
throw new IOException("Server identity signature validation failed.");
}
} catch (GeneralSecurityException e) {
throw (IOException)new IOException("Failed to negotiate transport security").initCause(e);
}
}
return new Channel("CLI connection to "+jenkins, pool,
new BufferedInputStream(c.in), new BufferedOutputStream(c.out));
}
/**
* If the server advertises CLI endpoint, returns its location.
*/
private CliPort getCliTcpPort(String url) throws IOException {
URL _url = new URL(url);
if (_url.getHost()==null || _url.getHost().length()==0) {
throw new IOException("Invalid URL: "+url);
}
URLConnection head = _url.openConnection();
try {
head.connect();
} catch (IOException e) {
throw (IOException)new IOException("Failed to connect to "+url).initCause(e);
}
String h = head.getHeaderField("X-Jenkins-CLI-Host");
if (h==null) h = head.getURL().getHost();
String p1 = head.getHeaderField("X-Jenkins-CLI-Port");
if (p1==null) p1 = head.getHeaderField("X-Hudson-CLI-Port"); // backward compatibility
String p2 = head.getHeaderField("X-Jenkins-CLI2-Port");
String identity = head.getHeaderField("X-Instance-Identity");
flushURLConnection(head);
if (p1==null && p2==null) {
throw new IOException("No X-Jenkins-CLI2-Port among " + head.getHeaderFields().keySet());
}
if (p2!=null) return new CliPort(new InetSocketAddress(h,Integer.parseInt(p2)),identity,2);
else return new CliPort(new InetSocketAddress(h,Integer.parseInt(p1)),identity,1);
}
/**
* Flush the supplied {@link URLConnection} input and close the
* connection nicely.
* @param conn the connection to flush/close
*/
private void flushURLConnection(URLConnection conn) {
byte[] buf = new byte[1024];
try {
InputStream is = conn.getInputStream();
while (is.read(buf) >= 0) {
// Ignore
}
is.close();
} catch (IOException e) {
try {
InputStream es = ((HttpURLConnection)conn).getErrorStream();
while (es.read(buf) >= 0) {
// Ignore
}
es.close();
} catch (IOException ex) {
// Ignore
}
}
}
/**
* Shuts down the channel and closes the underlying connection.
*/
public void close() throws IOException, InterruptedException {
channel.close();
channel.join();
if(ownsPool)
pool.shutdown();
for (Closeable c : closables)
c.close();
}
public int execute(List<String> args, InputStream stdin, OutputStream stdout, OutputStream stderr) {
return entryPoint.main(args, Locale.getDefault(),
new RemoteInputStream(stdin),
new RemoteOutputStream(stdout),
new RemoteOutputStream(stderr));
}
public int execute(List<String> args) {
return execute(args, System.in, System.out, System.err);
}
public int execute(String... args) {
return execute(Arrays.asList(args));
}
/**
* Returns true if the named command exists.
*/
public boolean hasCommand(String name) {
return entryPoint.hasCommand(name);
}
/**
* Accesses the underlying communication channel.
* @since 1.419
*/
public Channel getChannel() {
return channel;
}
/**
* Attempts to lift the security restriction on the underlying channel.
* This requires the administer privilege on the server.
*
* @throws SecurityException
* If we fail to upgrade the connection.
*/
public void upgrade() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
if (execute(Arrays.asList("groovy", "="),
new ByteArrayInputStream("hudson.remoting.Channel.current().setRestricted(false)".getBytes()),
out,out)!=0)
throw new SecurityException(out.toString()); // failed to upgrade
}
public static void main(final String[] _args) throws Exception {
// Logger l = Logger.getLogger(Channel.class.getName());
// l.setLevel(ALL);
// ConsoleHandler h = new ConsoleHandler();
// h.setLevel(ALL);
// l.addHandler(h);
//
System.exit(_main(_args));
}
public static int _main(String[] _args) throws Exception {
List<String> args = Arrays.asList(_args);
List<KeyPair> candidateKeys = new ArrayList<KeyPair>();
boolean sshAuthRequestedExplicitly = false;
String httpProxy=null;
String url = System.getenv("JENKINS_URL");
if (url==null)
url = System.getenv("HUDSON_URL");
while(!args.isEmpty()) {
String head = args.get(0);
if (head.equals("-version")) {
System.out.println("Version: "+computeVersion());
return 0;
}
if(head.equals("-s") && args.size()>=2) {
url = args.get(1);
args = args.subList(2,args.size());
continue;
}
if (head.equals("-noCertificateCheck")) {
System.out.println("Skipping HTTPS certificate checks altogether. Note that this is not secure at all.");
SSLContext context = SSLContext.getInstance("TLS");
context.init(null, new TrustManager[]{new NoCheckTrustManager()}, new SecureRandom());
HttpsURLConnection.setDefaultSSLSocketFactory(context.getSocketFactory());
// bypass host name check, too.
HttpsURLConnection.setDefaultHostnameVerifier(new HostnameVerifier() {
public boolean verify(String s, SSLSession sslSession) {
return true;
}
});
args = args.subList(1,args.size());
continue;
}
if(head.equals("-i") && args.size()>=2) {
File f = new File(args.get(1));
if (!f.exists()) {
printUsage(Messages.CLI_NoSuchFileExists(f));
return -1;
}
KeyPair kp;
try {
kp = loadKey(f);
} catch (IOException e) {
//if the PEM file is encrypted, IOException is thrown
kp = tryEncryptedFile(f);
} catch (GeneralSecurityException e) {
throw new Exception("Failed to load key: "+f,e);
}
if(kp != null)
candidateKeys.add(kp);
args = args.subList(2,args.size());
sshAuthRequestedExplicitly = true;
continue;
}
if(head.equals("-p") && args.size()>=2) {
httpProxy = args.get(1);
args = args.subList(2,args.size());
continue;
}
break;
}
if(url==null) {
printUsage(Messages.CLI_NoURL());
return -1;
}
if(args.isEmpty())
args = Arrays.asList("help"); // default to help
if (candidateKeys.isEmpty())
addDefaultPrivateKeyLocations(candidateKeys);
CLIConnectionFactory factory = new CLIConnectionFactory().url(url).httpsProxyTunnel(httpProxy);
String userInfo = new URL(url).getUserInfo();
if (userInfo != null) {
factory = factory.basicAuth(userInfo);
}
CLI cli = factory.connect();
try {
if (!candidateKeys.isEmpty()) {
try {
// TODO: server verification
cli.authenticate(candidateKeys);
} catch (IllegalStateException e) {
if (sshAuthRequestedExplicitly) {
System.err.println("The server doesn't support public key authentication");
return -1;
}
} catch (UnsupportedOperationException e) {
if (sshAuthRequestedExplicitly) {
System.err.println("The server doesn't support public key authentication");
return -1;
}
} catch (GeneralSecurityException e) {
if (sshAuthRequestedExplicitly) {
System.err.println(e.getMessage());
LOGGER.log(FINE,e.getMessage(),e);
return -1;
}
System.err.println("Failed to authenticate with your SSH keys.");
LOGGER.log(FINE,"Failed to authenticate with your SSH keys.",e);
}
}
// execute the command
// Arrays.asList is not serializable --- see 6835580
args = new ArrayList<String>(args);
return cli.execute(args, System.in, System.out, System.err);
} finally {
cli.close();
}
}
private static String computeVersion() {
Properties props = new Properties();
try {
InputStream is = CLI.class.getResourceAsStream("/jenkins/cli/jenkins-cli-version.properties");
if(is!=null)
props.load(is);
} catch (IOException e) {
e.printStackTrace(); // if the version properties is missing, that's OK.
}
return props.getProperty("version","?");
}
/**
* Loads RSA/DSA private key in a PEM format into {@link KeyPair}.
*/
public static KeyPair loadKey(File f, String passwd) throws IOException, GeneralSecurityException {
return loadKey(readPemFile(f), passwd);
}
public static KeyPair loadKey(File f) throws IOException, GeneralSecurityException {
return loadKey(f, null);
}
private static String readPemFile(File f) throws IOException{
DataInputStream dis = new DataInputStream(new FileInputStream(f));
byte[] bytes = new byte[(int) f.length()];
dis.readFully(bytes);
dis.close();
return new String(bytes);
}
/**
* Loads RSA/DSA private key in a PEM format into {@link KeyPair}.
*/
public static KeyPair loadKey(String pemString, String passwd) throws IOException, GeneralSecurityException {
Object key = PEMDecoder.decode(pemString.toCharArray(), passwd);
if (key instanceof com.trilead.ssh2.signature.RSAPrivateKey) {
com.trilead.ssh2.signature.RSAPrivateKey x = (com.trilead.ssh2.signature.RSAPrivateKey)key;
// System.out.println("ssh-rsa " + new String(Base64.encode(RSASHA1Verify.encodeSSHRSAPublicKey(x.getPublicKey()))));
return x.toJCEKeyPair();
}
if (key instanceof com.trilead.ssh2.signature.DSAPrivateKey) {
com.trilead.ssh2.signature.DSAPrivateKey x = (com.trilead.ssh2.signature.DSAPrivateKey)key;
KeyFactory kf = KeyFactory.getInstance("DSA");
// System.out.println("ssh-dsa " + new String(Base64.encode(DSASHA1Verify.encodeSSHDSAPublicKey(x.getPublicKey()))));
return new KeyPair(
kf.generatePublic(new DSAPublicKeySpec(x.getY(), x.getP(), x.getQ(), x.getG())),
kf.generatePrivate(new DSAPrivateKeySpec(x.getX(), x.getP(), x.getQ(), x.getG())));
}
throw new UnsupportedOperationException("Unrecognizable key format: "+key);
}
public static KeyPair loadKey(String pemString) throws IOException, GeneralSecurityException {
return loadKey(pemString, null);
}
private static KeyPair tryEncryptedFile(File f) throws IOException, GeneralSecurityException{
KeyPair kp = null;
if(isPemEncrypted(f)){
String passwd = askForPasswd(f.getCanonicalPath());
kp = loadKey(f,passwd);
}
return kp;
}
private static boolean isPemEncrypted(File f) throws IOException{
String pemString = readPemFile(f);
//simple check if the file is encrypted
return pemString.contains("4,ENCRYPTED");
}
@SuppressWarnings("Since15")
@IgnoreJRERequirement
private static String askForPasswd(String filePath){
try {
Console cons = System.console();
String passwd = null;
if (cons != null){
char[] p = cons.readPassword("%s", "Enter passphrase for "+filePath+":");
passwd = String.valueOf(p);
}
return passwd;
} catch (LinkageError e) {
throw new Error("Your private key is encrypted, but we need Java6 to ask you password safely",e);
}
}
/**
* try all the default key locations
*/
private static void addDefaultPrivateKeyLocations(List<KeyPair> keyFileCandidates) {
File home = new File(System.getProperty("user.home"));
for (String path : new String[]{".ssh/id_rsa",".ssh/id_dsa",".ssh/identity"}) {
File key = new File(home,path);
if (key.exists()) {
try {
keyFileCandidates.add(loadKey(key));
} catch (IOException e) {
// don't report an error. the user can still see it by using the -i option
LOGGER.log(FINE, "Failed to load "+key,e);
} catch (GeneralSecurityException e) {
LOGGER.log(FINE, "Failed to load " + key, e);
}
}
}
}
/**
* Authenticate ourselves against the server.
*
* @return
* identity of the server represented as a public key.
*/
public PublicKey authenticate(Iterable<KeyPair> privateKeys) throws IOException, GeneralSecurityException {
Pipe c2s = Pipe.createLocalToRemote();
Pipe s2c = Pipe.createRemoteToLocal();
entryPoint.authenticate("ssh",c2s, s2c);
Connection c = new Connection(s2c.getIn(), c2s.getOut());
try {
byte[] sharedSecret = c.diffieHellman(false).generateSecret();
PublicKey serverIdentity = c.verifyIdentity(sharedSecret);
// try all the public keys
for (KeyPair key : privateKeys) {
c.proveIdentity(sharedSecret,key);
if (c.readBoolean())
return serverIdentity; // succeeded
}
if (privateKeys.iterator().hasNext())
throw new GeneralSecurityException("Authentication failed. No private key accepted.");
else
throw new GeneralSecurityException("No private key is available for use in authentication");
} finally {
c.close();
}
}
public PublicKey authenticate(KeyPair key) throws IOException, GeneralSecurityException {
return authenticate(Collections.singleton(key));
}
private static void printUsage(String msg) {
if(msg!=null) System.out.println(msg);
System.err.println(Messages.CLI_Usage());
}
private static final Logger LOGGER = Logger.getLogger(CLI.class.getName());
}
|
[
"\"JENKINS_URL\"",
"\"HUDSON_URL\""
] |
[] |
[
"JENKINS_URL",
"HUDSON_URL"
] |
[]
|
["JENKINS_URL", "HUDSON_URL"]
|
java
| 2 | 0 | |
cli/cmd/main_test.go
|
package cmd
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/sergi/go-diff/diffmatchpatch"
)
var (
// updateFixtures is set by the `-update` flag.
updateFixtures bool
// prettyDiff is set by the `-verbose-diff` flag.
prettyDiff bool
)
// TestMain parses flags before running tests
func TestMain(m *testing.M) {
flag.BoolVar(&updateFixtures, "update", false, "update text fixtures in place")
prettyDiff = os.Getenv("LINKERD_TEST_PRETTY_DIFF") != ""
flag.BoolVar(&prettyDiff, "pretty-diff", prettyDiff, "display the full text when diffing")
flag.Parse()
os.Exit(m.Run())
}
// readTestdata reads a file and returns the contents of that file as a string.
func readTestdata(t *testing.T, fileName string) string {
file, err := os.Open(filepath.Join("testdata", fileName))
if err != nil {
t.Fatalf("Failed to open expected output file: %v", err)
}
fixture, err := ioutil.ReadAll(file)
if err != nil {
t.Fatalf("Failed to read expected output file: %v", err)
}
return string(fixture)
}
func writeTestdata(t *testing.T, fileName string, data []byte) {
p := filepath.Join("testdata", fileName)
if err := ioutil.WriteFile(p, data, 0644); err != nil {
t.Fatal(err)
}
}
// TODO: share this with integration tests
func diffTestdata(t *testing.T, path, actual string) {
expected := readTestdata(t, path)
if actual == expected {
return
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(expected, actual, true)
diffs = dmp.DiffCleanupSemantic(diffs)
var diff string
if prettyDiff {
diff = dmp.DiffPrettyText(diffs)
} else {
diff = dmp.PatchToText(dmp.PatchMake(diffs))
}
t.Errorf("mismatch: %s\n%s", path, diff)
if updateFixtures {
writeTestdata(t, path, []byte(actual))
}
}
|
[
"\"LINKERD_TEST_PRETTY_DIFF\""
] |
[] |
[
"LINKERD_TEST_PRETTY_DIFF"
] |
[]
|
["LINKERD_TEST_PRETTY_DIFF"]
|
go
| 1 | 0 | |
Django/d_party/asgi.py
|
"""
ASGI config for d_party project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "d_party.settings")
# application = get_asgi_application()
django_asgi_app = get_asgi_application()
from channels.routing import ProtocolTypeRouter
from channels.routing import URLRouter
from channels.auth import AuthMiddlewareStack
from channels.security.websocket import OriginValidator
import streamer.routing
from . import settings
allowed_hosts = [
os.environ["D_ANIME_STORE_DOMAIN"],
"http://" + os.environ["D_ANIME_STORE_DOMAIN"],
"https://" + os.environ["D_ANIME_STORE_DOMAIN"],
"http://" + os.environ["D_ANIME_STORE_DOMAIN"] + ":80",
"https://" + os.environ["D_ANIME_STORE_DOMAIN"] + ":443",
]
if settings.DEBUG:
allowed_hosts += ["*"]
application = ProtocolTypeRouter(
{
"http": django_asgi_app,
"websocket": OriginValidator(
AuthMiddlewareStack(URLRouter(streamer.routing.websocket_urlpatterns)),
allowed_hosts,
),
}
)
|
[] |
[] |
[
"D_ANIME_STORE_DOMAIN"
] |
[]
|
["D_ANIME_STORE_DOMAIN"]
|
python
| 1 | 0 | |
pkg/rcache/rcache.go
|
package rcache
import (
"fmt"
"os"
"time"
"unicode/utf8"
"github.com/garyburd/redigo/redis"
"github.com/sourcegraph/sourcegraph/pkg/conf"
"github.com/sourcegraph/sourcegraph/pkg/redispool"
"gopkg.in/inconshreveable/log15.v2"
)
// dataVersion is used for releases that change type struture for
// data that may already be cached. Increasing this number will
// change the key prefix that is used for all hash keys,
// effectively resetting the cache at the same time the new code
// is deployed.
const dataVersion = "v2"
const dataVersionToDelete = "v1"
// DeleteOldCacheData deletes the rcache data in the given Redis instance
// that's prefixed with dataVersionToDelete
func DeleteOldCacheData(c redis.Conn) error {
return deleteKeysWithPrefix(c, dataVersionToDelete)
}
// Cache implements httpcache.Cache
type Cache struct {
keyPrefix string
ttlSeconds int
}
// New creates a redis backed Cache
func New(keyPrefix string) *Cache {
return &Cache{
keyPrefix: keyPrefix,
}
}
// NewWithTTL creates a redis backed Cache which expires values after
// ttlSeconds.
func NewWithTTL(keyPrefix string, ttlSeconds int) *Cache {
return &Cache{
keyPrefix: keyPrefix,
ttlSeconds: ttlSeconds,
}
}
func (r *Cache) GetMulti(keys ...string) [][]byte {
c := pool.Get()
defer c.Close()
if len(keys) == 0 {
return nil
}
rkeys := make([]interface{}, len(keys))
for i, key := range keys {
rkeys[i] = r.rkeyPrefix() + key
}
vals, err := redis.Values(c.Do("MGET", rkeys...))
if err != nil && err != redis.ErrNil {
log15.Warn("failed to execute redis command", "cmd", "MGET", "error", err)
}
strVals := make([][]byte, len(vals))
for i, val := range vals {
b, err := redis.Bytes(val, nil)
if err != nil {
log15.Warn("failed to parse bytes from Redis value", "value", val)
continue
}
strVals[i] = b
}
return strVals
}
func (r *Cache) SetMulti(keyvals ...[2]string) {
c := pool.Get()
defer c.Close()
if len(keyvals) == 0 {
return
}
for _, kv := range keyvals {
k, v := kv[0], kv[1]
if !utf8.Valid([]byte(k)) {
if conf.IsDev(conf.DeployType()) {
panic(fmt.Sprintf("rcache: keys must be valid utf8 %v", []byte(k)))
} else {
log15.Error("rcache: keys must be valid utf8", "key", []byte(k))
}
continue
}
if r.ttlSeconds == 0 {
if err := c.Send("SET", r.rkeyPrefix()+k, []byte(v)); err != nil {
log15.Warn("failed to write redis command to client output buffer", "cmd", "SET", "error", err)
}
} else {
if err := c.Send("SETEX", r.rkeyPrefix()+k, r.ttlSeconds, []byte(v)); err != nil {
log15.Warn("failed to write redis command to client output buffer", "cmd", "SETEX", "error", err)
}
}
}
if err := c.Flush(); err != nil {
log15.Warn("failed to flush Redis client", "error", err)
}
}
// Get implements httpcache.Cache.Get
func (r *Cache) Get(key string) ([]byte, bool) {
c := pool.Get()
defer c.Close()
b, err := redis.Bytes(c.Do("GET", r.rkeyPrefix()+key))
if err != nil && err != redis.ErrNil {
log15.Warn("failed to execute redis command", "cmd", "GET", "error", err)
}
return b, err == nil
}
// Set implements httpcache.Cache.Set
func (r *Cache) Set(key string, b []byte) {
c := pool.Get()
defer c.Close()
if !utf8.Valid([]byte(key)) {
if conf.IsDev(conf.DeployType()) {
panic(fmt.Sprintf("rcache: keys must be valid utf8 %v", []byte(key)))
} else {
log15.Error("rcache: keys must be valid utf8", "key", []byte(key))
}
}
if r.ttlSeconds == 0 {
_, err := c.Do("SET", r.rkeyPrefix()+key, b)
if err != nil {
log15.Warn("failed to execute redis command", "cmd", "SET", "error", err)
}
} else {
_, err := c.Do("SETEX", r.rkeyPrefix()+key, r.ttlSeconds, b)
if err != nil {
log15.Warn("failed to execute redis command", "cmd", "SETEX", "error", err)
}
}
}
// Delete implements httpcache.Cache.Delete
func (r *Cache) Delete(key string) {
c := pool.Get()
defer c.Close()
_, err := c.Do("DEL", r.rkeyPrefix()+key)
if err != nil {
log15.Warn("failed to execute redis command", "cmd", "DEL", "error", err)
}
}
// rkeyPrefix generates the actual key prefix we use on redis.
func (r *Cache) rkeyPrefix() string {
return fmt.Sprintf("%s:%s:", globalPrefix, r.keyPrefix)
}
// TB is a subset of testing.TB
type TB interface {
Name() string
Skip(args ...interface{})
Helper()
}
// SetupForTest adjusts the globalPrefix and clears it out. You will have
// conflicts if you do `t.Parallel()`
func SetupForTest(t TB) {
t.Helper()
pool = &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", "127.0.0.1:6379")
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
globalPrefix = "__test__" + t.Name()
// Make mutex fails faster
mutexTries = 1
c := pool.Get()
defer c.Close()
// If we are not on CI, skip the test if our redis connection fails.
if os.Getenv("CI") == "" {
_, err := c.Do("PING")
if err != nil {
t.Skip("could not connect to redis", err)
}
}
err := deleteKeysWithPrefix(c, globalPrefix)
if err != nil {
log15.Error("Could not clear test prefix", "name", t.Name(), "globalPrefix", globalPrefix, "error", err)
}
}
func deleteKeysWithPrefix(c redis.Conn, prefix string) error {
const script = `local keys = redis.call('keys', ARGV[1])
if #keys > 0 then
return redis.call('del', unpack(keys))
else
return ''
end`
_, err := c.Do("EVAL", script, 0, prefix+":*")
return err
}
var (
pool = redispool.Cache
globalPrefix = dataVersion
)
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
graphql/util.go
|
package graphql
import (
"context"
"fmt"
"net/http"
"net/url"
"os"
"regexp"
"runtime/debug"
"sort"
"strings"
"time"
"github.com/evergreen-ci/evergreen/apimodels"
"github.com/evergreen-ci/evergreen/model/event"
"github.com/evergreen-ci/evergreen"
"github.com/evergreen-ci/evergreen/cloud"
"github.com/evergreen-ci/evergreen/model"
"github.com/evergreen-ci/evergreen/model/artifact"
"github.com/evergreen-ci/evergreen/model/commitqueue"
"github.com/evergreen-ci/evergreen/model/host"
"github.com/evergreen-ci/evergreen/model/patch"
"github.com/evergreen-ci/evergreen/model/task"
"github.com/evergreen-ci/evergreen/model/user"
"github.com/evergreen-ci/evergreen/rest/data"
restModel "github.com/evergreen-ci/evergreen/rest/model"
"github.com/evergreen-ci/evergreen/units"
"github.com/evergreen-ci/gimlet"
"github.com/evergreen-ci/utility"
"github.com/mongodb/grip"
"github.com/mongodb/grip/message"
"github.com/pkg/errors"
"github.com/vektah/gqlparser/v2/gqlerror"
"golang.org/x/crypto/ssh"
)
// GetGroupedFiles returns the files of a Task inside a GroupedFile struct
func GetGroupedFiles(ctx context.Context, name string, taskID string, execution int) (*GroupedFiles, error) {
taskFiles, err := artifact.GetAllArtifacts([]artifact.TaskIDAndExecution{{TaskID: taskID, Execution: execution}})
if err != nil {
return nil, ResourceNotFound.Send(ctx, err.Error())
}
hasUser := gimlet.GetUser(ctx) != nil
strippedFiles, err := artifact.StripHiddenFiles(taskFiles, hasUser)
if err != nil {
return nil, err
}
apiFileList := []*restModel.APIFile{}
for _, file := range strippedFiles {
apiFile := restModel.APIFile{}
err := apiFile.BuildFromService(file)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("error stripping hidden files"))
}
apiFileList = append(apiFileList, &apiFile)
}
return &GroupedFiles{TaskName: &name, Files: apiFileList}, nil
}
func SetScheduled(ctx context.Context, sc data.Connector, taskID string, isActive bool) (*restModel.APITask, error) {
usr := MustHaveUser(ctx)
t, err := task.FindOneId(taskID)
if err != nil {
return nil, ResourceNotFound.Send(ctx, err.Error())
}
if t == nil {
return nil, ResourceNotFound.Send(ctx, errors.Errorf("task %s not found", taskID).Error())
}
if t.Requester == evergreen.MergeTestRequester && isActive {
return nil, InputValidationError.Send(ctx, "commit queue tasks cannot be manually scheduled")
}
if err = model.SetActiveState(t, usr.Username(), isActive); err != nil {
return nil, InternalServerError.Send(ctx, err.Error())
}
// Get the modified task back out of the db
t, err = task.FindOneId(taskID)
if err != nil {
return nil, ResourceNotFound.Send(ctx, err.Error())
}
if t == nil {
return nil, ResourceNotFound.Send(ctx, err.Error())
}
apiTask := restModel.APITask{}
err = apiTask.BuildFromService(t)
if err != nil {
return nil, InternalServerError.Send(ctx, err.Error())
}
err = apiTask.BuildFromService(sc.GetURL())
if err != nil {
return nil, InternalServerError.Send(ctx, err.Error())
}
return &apiTask, nil
}
// GetFormattedDate returns a time.Time type in the format "Dec 13, 2020, 11:58:04 pm"
func GetFormattedDate(t *time.Time, timezone string) (*string, error) {
if t == nil {
return nil, nil
}
loc, err := time.LoadLocation(timezone)
if err != nil {
return nil, err
}
timeInUserTimezone := t.In(loc)
newTime := fmt.Sprintf("%s %d, %d, %s", timeInUserTimezone.Month(), timeInUserTimezone.Day(), timeInUserTimezone.Year(), timeInUserTimezone.Format(time.Kitchen))
return &newTime, nil
}
func getVersionBaseTasks(d data.Connector, versionID string) ([]task.Task, error) {
version, err := d.FindVersionById(versionID)
if err != nil {
return nil, fmt.Errorf("Error getting version %s: %s", versionID, err.Error())
}
if version == nil {
return nil, fmt.Errorf("No version found for ID %s", versionID)
}
baseVersion, err := model.VersionFindOne(model.BaseVersionByProjectIdAndRevision(version.Identifier, version.Revision))
if err != nil {
return nil, fmt.Errorf("Error getting base version from version %s: %s", version.Id, err.Error())
}
if baseVersion == nil {
return nil, fmt.Errorf("No base version found from version %s", version.Id)
}
baseTasks, err := task.FindTasksFromVersions([]string{baseVersion.Id})
if err != nil {
return nil, fmt.Errorf("Error getting tasks from version %s: %s", baseVersion.Id, err.Error())
}
if baseTasks == nil {
return nil, fmt.Errorf("No tasks found for version %s", baseVersion.Id)
}
return baseTasks, nil
}
// BaseTaskStatuses represents the format {buildVariant: {displayName: status}} for base task statuses
type BaseTaskStatuses map[string]map[string]string
// GetBaseTaskStatusesFromPatchID gets the status of each base build associated with a task
func GetBaseTaskStatusesFromPatchID(d data.Connector, patchID string) (BaseTaskStatuses, error) {
baseTasks, err := getVersionBaseTasks(d, patchID)
if err != nil {
return nil, err
}
baseTaskStatusesByDisplayNameByVariant := make(map[string]map[string]string)
for _, task := range baseTasks {
if _, ok := baseTaskStatusesByDisplayNameByVariant[task.BuildVariant]; !ok {
baseTaskStatusesByDisplayNameByVariant[task.BuildVariant] = map[string]string{}
}
baseTaskStatusesByDisplayNameByVariant[task.BuildVariant][task.DisplayName] = task.GetDisplayStatus()
}
return baseTaskStatusesByDisplayNameByVariant, nil
}
func hasEnqueuePatchPermission(u *user.DBUser, existingPatch *restModel.APIPatch) bool {
if u == nil || existingPatch == nil {
return false
}
// patch owner
if utility.FromStringPtr(existingPatch.Author) == u.Username() {
return true
}
// superuser
permissions := gimlet.PermissionOpts{
Resource: evergreen.SuperUserPermissionsID,
ResourceType: evergreen.SuperUserResourceType,
Permission: evergreen.PermissionAdminSettings,
RequiredLevel: evergreen.AdminSettingsEdit.Value,
}
if u.HasPermission(permissions) {
return true
}
return u.HasPermission(gimlet.PermissionOpts{
Resource: utility.FromStringPtr(existingPatch.ProjectId),
ResourceType: evergreen.ProjectResourceType,
Permission: evergreen.PermissionProjectSettings,
RequiredLevel: evergreen.ProjectSettingsEdit.Value,
})
}
// SchedulePatch schedules a patch. It returns an error and an HTTP status code. In the case of
// success, it also returns a success message and a version ID.
func SchedulePatch(ctx context.Context, patchId string, version *model.Version, patchUpdateReq PatchUpdate) (error, int, string, string) {
var err error
p, err := patch.FindOneId(patchId)
if err != nil {
return errors.Errorf("error loading patch: %s", err), http.StatusInternalServerError, "", ""
}
// only modify parameters if the patch hasn't been finalized
if patchUpdateReq.ParametersModel != nil && p.Version == "" {
var parameters []patch.Parameter
for _, param := range patchUpdateReq.ParametersModel {
parameters = append(parameters, param.ToService())
}
if err = p.SetParameters(parameters); err != nil {
return errors.Errorf("error setting patch parameters: %s", err), http.StatusInternalServerError, "", ""
}
}
if p.IsCommitQueuePatch() {
return errors.New("can't schedule commit queue patch"), http.StatusBadRequest, "", ""
}
// Unmarshal the project config and set it in the project context
project := &model.Project{}
if _, _, err = model.LoadProjectInto(ctx, []byte(p.PatchedConfig), nil, p.Project, project); err != nil {
return errors.Errorf("Error unmarshaling project config: %v", err), http.StatusInternalServerError, "", ""
}
addDisplayTasksToPatchReq(&patchUpdateReq, *project)
tasks := model.VariantTasksToTVPairs(patchUpdateReq.VariantsTasks)
tasks.ExecTasks, err = model.IncludeDependencies(project, tasks.ExecTasks, p.GetRequester())
grip.Warning(message.WrapError(err, message.Fields{
"message": "error including dependencies for patch",
"patch": patchId,
}))
if err = model.ValidateTVPairs(project, tasks.ExecTasks); err != nil {
return err, http.StatusBadRequest, "", ""
}
// update the description for both reconfigured and new patches
if err = p.SetDescription(patchUpdateReq.Description); err != nil {
return errors.Wrap(err, "Error setting description"), http.StatusInternalServerError, "", ""
}
// update the description for both reconfigured and new patches
if err = p.SetVariantsTasks(tasks.TVPairsToVariantTasks()); err != nil {
return errors.Wrap(err, "Error setting description"), http.StatusInternalServerError, "", ""
}
// create a separate context from the one the callar has so that the caller
// can't interrupt the db operations here
newCxt := context.Background()
projectRef, err := model.FindMergedProjectRef(project.Identifier, p.Version, true)
if err != nil {
return errors.Wrap(err, "unable to find project ref"), http.StatusInternalServerError, "", ""
}
if projectRef == nil {
return errors.Errorf("project '%s' not found", project.Identifier), http.StatusInternalServerError, "", ""
}
if p.Version != "" {
p.Activated = true
// This patch has already been finalized, just add the new builds and tasks
if version == nil {
return errors.Errorf("Couldn't find patch for id %v", p.Version), http.StatusInternalServerError, "", ""
}
// First add new tasks to existing builds, if necessary
err = model.AddNewTasksForPatch(context.Background(), p, version, project, tasks, projectRef.Identifier)
if err != nil {
return errors.Wrapf(err, "Error creating new tasks for version `%s`", version.Id), http.StatusInternalServerError, "", ""
}
err = model.AddNewBuildsForPatch(newCxt, p, version, project, tasks, projectRef)
if err != nil {
return errors.Wrapf(err, "Error creating new builds for version `%s`", version.Id), http.StatusInternalServerError, "", ""
}
return nil, http.StatusOK, "Builds and tasks successfully added to patch.", version.Id
} else {
settings, err := evergreen.GetConfig()
if err != nil {
return err, http.StatusInternalServerError, "", ""
}
githubOauthToken, err := settings.GetGithubOauthToken()
if err != nil {
return err, http.StatusInternalServerError, "", ""
}
p.Activated = true
err = p.SetVariantsTasks(tasks.TVPairsToVariantTasks())
if err != nil {
return errors.Wrap(err, "Error setting patch variants and tasks"), http.StatusInternalServerError, "", ""
}
// Process additional patch trigger aliases added via UI.
// Child patches created with the CLI --trigger-alias flag go through a separate flow, so ensure that new child patches are also created before the parent is finalized.
childPatchIds, err := units.ProcessTriggerAliases(ctx, p, projectRef, evergreen.GetEnvironment(), patchUpdateReq.PatchTriggerAliases)
if err != nil {
return errors.Wrap(err, "Error processing patch trigger aliases"), http.StatusInternalServerError, "", ""
}
if len(childPatchIds) > 0 {
if err = p.SetChildPatches(); err != nil {
return errors.Wrapf(err, "error attaching child patches '%s'", p.Id.Hex()), http.StatusInternalServerError, "", ""
}
p.Triggers.Aliases = patchUpdateReq.PatchTriggerAliases
if err = p.SetTriggerAliases(); err != nil {
return errors.Wrapf(err, "error attaching trigger aliases '%s'", p.Id.Hex()), http.StatusInternalServerError, "", ""
}
}
requester := p.GetRequester()
ver, err := model.FinalizePatch(newCxt, p, requester, githubOauthToken)
if err != nil {
return errors.Wrap(err, "Error finalizing patch"), http.StatusInternalServerError, "", ""
}
if requester == evergreen.PatchVersionRequester {
grip.Info(message.Fields{
"operation": "patch creation",
"message": "finalized patch",
"from": "UI",
"patch_id": p.Id,
"variants": p.BuildVariants,
"tasks": p.Tasks,
"variant_tasks": p.VariantsTasks,
"alias": p.Alias,
})
}
if p.IsGithubPRPatch() {
job := units.NewGithubStatusUpdateJobForNewPatch(p.Id.Hex())
if err := evergreen.GetEnvironment().LocalQueue().Put(newCxt, job); err != nil {
return errors.Wrap(err, "Error adding github status update job to queue"), http.StatusInternalServerError, "", ""
}
}
return nil, http.StatusOK, "Patch builds are scheduled.", ver.Id
}
}
func addDisplayTasksToPatchReq(req *PatchUpdate, p model.Project) {
for i, vt := range req.VariantsTasks {
bv := p.FindBuildVariant(vt.Variant)
if bv == nil {
continue
}
for i := len(vt.Tasks) - 1; i >= 0; i-- {
task := vt.Tasks[i]
displayTask := bv.GetDisplayTask(task)
if displayTask == nil {
continue
}
vt.Tasks = append(vt.Tasks[:i], vt.Tasks[i+1:]...)
vt.DisplayTasks = append(vt.DisplayTasks, *displayTask)
}
req.VariantsTasks[i] = vt
}
}
type VariantsAndTasksFromProject struct {
Variants map[string]model.BuildVariant
Tasks []struct{ Name string }
Project model.Project
}
func GetVariantsAndTasksFromProject(ctx context.Context, patchedConfig, patchProject string) (*VariantsAndTasksFromProject, error) {
project := &model.Project{}
if _, _, err := model.LoadProjectInto(ctx, []byte(patchedConfig), nil, patchProject, project); err != nil {
return nil, errors.Errorf("Error unmarshaling project config: %v", err)
}
// retrieve tasks and variant mappings' names
variantMappings := make(map[string]model.BuildVariant)
for _, variant := range project.BuildVariants {
tasksForVariant := []model.BuildVariantTaskUnit{}
for _, taskFromVariant := range variant.Tasks {
// add a task name to the list if it's patchable and not restricted to git tags and not disabled
if !taskFromVariant.IsDisabled() && utility.FromBoolTPtr(taskFromVariant.Patchable) && !utility.FromBoolPtr(taskFromVariant.GitTagOnly) {
if taskFromVariant.IsGroup {
tasksForVariant = append(tasksForVariant, model.CreateTasksFromGroup(taskFromVariant, project, evergreen.PatchVersionRequester)...)
} else {
tasksForVariant = append(tasksForVariant, taskFromVariant)
}
}
}
if len(tasksForVariant) > 0 {
variant.Tasks = tasksForVariant
variantMappings[variant.Name] = variant
}
}
tasksList := []struct{ Name string }{}
for _, task := range project.Tasks {
// add a task name to the list if it's patchable and not restricted to git tags and not disabled
if !utility.FromBoolPtr(task.Disable) && utility.FromBoolTPtr(task.Patchable) && !utility.FromBoolPtr(task.GitTagOnly) {
tasksList = append(tasksList, struct{ Name string }{task.Name})
}
}
variantsAndTasksFromProject := VariantsAndTasksFromProject{
Variants: variantMappings,
Tasks: tasksList,
Project: *project,
}
return &variantsAndTasksFromProject, nil
}
// GetPatchProjectVariantsAndTasksForUI gets the variants and tasks for a project for a patch id
func GetPatchProjectVariantsAndTasksForUI(ctx context.Context, apiPatch *restModel.APIPatch) (*PatchProject, error) {
patchProjectVariantsAndTasks, err := GetVariantsAndTasksFromProject(ctx, *apiPatch.PatchedConfig, *apiPatch.ProjectId)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("Error getting project variants and tasks for patch %s: %s", *apiPatch.Id, err.Error()))
}
// convert variants to UI data structure
variants := []*ProjectBuildVariant{}
for _, buildVariant := range patchProjectVariantsAndTasks.Variants {
projBuildVariant := ProjectBuildVariant{
Name: buildVariant.Name,
DisplayName: buildVariant.DisplayName,
}
projTasks := []string{}
for _, taskUnit := range buildVariant.Tasks {
projTasks = append(projTasks, taskUnit.Name)
}
for _, displayTask := range buildVariant.DisplayTasks {
projTasks = append(projTasks, displayTask.Name)
}
sort.SliceStable(projTasks, func(i, j int) bool {
return projTasks[i] < projTasks[j]
})
projBuildVariant.Tasks = projTasks
variants = append(variants, &projBuildVariant)
}
sort.SliceStable(variants, func(i, j int) bool {
return variants[i].DisplayName < variants[j].DisplayName
})
patchProject := PatchProject{
Variants: variants,
}
return &patchProject, nil
}
type PatchUpdate struct {
Description string `json:"description"`
ParametersModel []*restModel.APIParameter `json:"parameters_model,omitempty"`
PatchTriggerAliases []string `json:"patch_trigger_aliases,omitempty"`
VariantsTasks []patch.VariantTasks `json:"variants_tasks,omitempty"`
}
// BuildFromGqlInput takes a PatchConfigure gql type and returns a PatchUpdate type
func (p *PatchUpdate) BuildFromGqlInput(r PatchConfigure) {
p.Description = r.Description
p.PatchTriggerAliases = r.PatchTriggerAliases
p.ParametersModel = r.Parameters
for _, vt := range r.VariantsTasks {
variantTasks := patch.VariantTasks{
Variant: vt.Variant,
Tasks: vt.Tasks,
}
for _, displayTask := range vt.DisplayTasks {
// note that the UI does not pass ExecTasks, which tells the back-end model figure out the right execution tasks
dt := patch.DisplayTask{Name: displayTask.Name}
variantTasks.DisplayTasks = append(variantTasks.DisplayTasks, dt)
}
p.VariantsTasks = append(p.VariantsTasks, variantTasks)
}
}
// GetAPITaskFromTask builds an APITask from the given task
func GetAPITaskFromTask(ctx context.Context, sc data.Connector, task task.Task) (*restModel.APITask, error) {
apiTask := restModel.APITask{}
err := apiTask.BuildFromService(&task)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("error building apiTask from task %s: %s", task.Id, err.Error()))
}
err = apiTask.BuildFromService(sc.GetURL())
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("Error setting building task from apiTask %s: %s", task.Id, err.Error()))
}
return &apiTask, nil
}
// Takes a version id and some filter criteria and returns the matching associated tasks grouped together by their build variant.
func generateBuildVariants(sc data.Connector, versionId string, searchVariants []string, searchTasks []string, statuses []string) ([]*GroupedBuildVariant, error) {
var variantDisplayName map[string]string = map[string]string{}
var tasksByVariant map[string][]*restModel.APITask = map[string][]*restModel.APITask{}
defaultSort := []task.TasksSortOrder{
{Key: task.DisplayNameKey, Order: 1},
}
opts := data.TaskFilterOptions{
Statuses: statuses,
Variants: searchVariants,
TaskNames: searchTasks,
Sorts: defaultSort,
IncludeBaseTasks: true,
}
start := time.Now()
tasks, _, err := sc.FindTasksByVersion(versionId, opts)
if err != nil {
return nil, errors.Wrapf(err, fmt.Sprintf("Error getting tasks for patch `%s`", versionId))
}
timeToFindTasks := time.Since(start)
buildTaskStartTime := time.Now()
for _, t := range tasks {
apiTask := restModel.APITask{}
err := apiTask.BuildFromService(&t)
if err != nil {
return nil, errors.Wrapf(err, fmt.Sprintf("Error building apiTask from task : %s", t.Id))
}
variantDisplayName[t.BuildVariant] = t.BuildVariantDisplayName
tasksByVariant[t.BuildVariant] = append(tasksByVariant[t.BuildVariant], &apiTask)
}
timeToBuildTasks := time.Since(buildTaskStartTime)
groupTasksStartTime := time.Now()
result := []*GroupedBuildVariant{}
for variant, tasks := range tasksByVariant {
pbv := GroupedBuildVariant{
Variant: variant,
DisplayName: variantDisplayName[variant],
Tasks: tasks,
}
result = append(result, &pbv)
}
timeToGroupTasks := time.Since(groupTasksStartTime)
sortTasksStartTime := time.Now()
// sort variants by name
sort.SliceStable(result, func(i, j int) bool {
return result[i].DisplayName < result[j].DisplayName
})
timeToSortTasks := time.Since(sortTasksStartTime)
totalTime := time.Since(start)
grip.InfoWhen(totalTime > time.Second*2, message.Fields{
"Ticket": "EVG-14828",
"timeToFindTasks": timeToFindTasks,
"timeToBuildTasks": timeToBuildTasks,
"timeToGroupTasks": timeToGroupTasks,
"timeToSortTasks": timeToSortTasks,
"totalTime": totalTime,
"versionId": versionId,
"taskCount": len(tasks),
"buildVariantCount": len(result),
})
return result, nil
}
// getFailedTestResultsSample returns a sample of failed test results for the given tasks that match the supplied testFilters
func getCedarFailedTestResultsSample(ctx context.Context, tasks []task.Task, testFilters []string) ([]apimodels.CedarFailedTestResultsSample, error) {
if len(tasks) == 0 {
return nil, nil
}
taskFilters := []apimodels.CedarTaskInfo{}
for _, t := range tasks {
taskFilters = append(taskFilters, apimodels.CedarTaskInfo{
TaskID: t.Id,
Execution: t.Execution,
DisplayTask: t.DisplayOnly,
})
}
opts := apimodels.GetCedarFailedTestResultsSampleOptions{
BaseURL: evergreen.GetEnvironment().Settings().Cedar.BaseURL,
SampleOptions: apimodels.CedarFailedTestSampleOptions{
Tasks: taskFilters,
RegexFilters: testFilters,
},
}
results, err := apimodels.GetCedarFilteredFailedSamples(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "getting cedar filtered failed samples")
}
return results, nil
}
type VersionModificationAction string
const (
Restart VersionModificationAction = "restart"
SetActive VersionModificationAction = "set_active"
SetPriority VersionModificationAction = "set_priority"
)
type VersionModifications struct {
Action VersionModificationAction `json:"action"`
Active bool `json:"active"`
Abort bool `json:"abort"`
Priority int64 `json:"priority"`
VersionsToRestart []*model.VersionToRestart `json:"versions_to_restart"`
TaskIds []string `json:"task_ids"` // deprecated
}
func ModifyVersion(version model.Version, user user.DBUser, proj *model.ProjectRef, modifications VersionModifications) (int, error) {
switch modifications.Action {
case Restart:
if modifications.VersionsToRestart == nil { // to maintain backwards compatibility with legacy Ui and support the deprecated restartPatch resolver
if err := model.RestartVersion(version.Id, modifications.TaskIds, modifications.Abort, user.Id); err != nil {
return http.StatusInternalServerError, errors.Errorf("error restarting patch: %s", err)
}
}
if err := model.RestartVersions(modifications.VersionsToRestart, modifications.Abort, user.Id); err != nil {
return http.StatusInternalServerError, errors.Errorf("error restarting patch: %s", err)
}
case SetActive:
if version.Requester == evergreen.MergeTestRequester && modifications.Active {
return http.StatusBadRequest, errors.New("commit queue merges cannot be manually scheduled")
}
if err := model.SetVersionActivation(version.Id, modifications.Active, user.Id); err != nil {
return http.StatusInternalServerError, errors.Errorf("error activating patch: %s", err)
}
// abort after deactivating the version so we aren't bombarded with failing tasks while
// the deactivation is in progress
if modifications.Abort {
if err := task.AbortVersion(version.Id, task.AbortInfo{User: user.DisplayName()}); err != nil {
return http.StatusInternalServerError, errors.Errorf("error aborting patch: %s", err)
}
}
if !modifications.Active && version.Requester == evergreen.MergeTestRequester {
var projId string
if proj == nil {
id, err := model.GetIdForProject(version.Identifier)
if err != nil {
return http.StatusNotFound, errors.Errorf("error getting project ref: %s", err.Error())
}
if id == "" {
return http.StatusNotFound, errors.Errorf("project %s does not exist", version.Branch)
}
projId = id
} else {
projId = proj.Id
}
_, err := commitqueue.RemoveCommitQueueItemForVersion(projId, version.Id, user.DisplayName())
if err != nil {
return http.StatusInternalServerError, errors.Errorf("error removing patch from commit queue: %s", err)
}
p, err := patch.FindOneId(version.Id)
if err != nil {
return http.StatusInternalServerError, errors.Wrap(err, "unable to find patch")
}
if p == nil {
return http.StatusNotFound, errors.New("patch not found")
}
err = model.SendCommitQueueResult(p, message.GithubStateError, fmt.Sprintf("deactivated by '%s'", user.DisplayName()))
grip.Error(message.WrapError(err, message.Fields{
"message": "unable to send github status",
"patch": version.Id,
}))
err = model.RestartItemsAfterVersion(nil, projId, version.Id, user.Id)
if err != nil {
return http.StatusInternalServerError, errors.Errorf("error restarting later commit queue items: %s", err)
}
}
case SetPriority:
projId := version.Identifier
if projId == "" {
return http.StatusNotFound, errors.Errorf("Could not find project for version %s", version.Id)
}
if modifications.Priority > evergreen.MaxTaskPriority {
requiredPermission := gimlet.PermissionOpts{
Resource: projId,
ResourceType: "project",
Permission: evergreen.PermissionTasks,
RequiredLevel: evergreen.TasksAdmin.Value,
}
if !user.HasPermission(requiredPermission) {
return http.StatusUnauthorized, errors.Errorf("Insufficient access to set priority %v, can only set priority less than or equal to %v", modifications.Priority, evergreen.MaxTaskPriority)
}
}
if err := model.SetVersionPriority(version.Id, modifications.Priority, user.Id); err != nil {
return http.StatusInternalServerError, errors.Errorf("error setting version priority: %s", err)
}
default:
return http.StatusBadRequest, errors.Errorf("Unrecognized action: %v", modifications.Action)
}
return 0, nil
}
// ModifyVersionHandler handles the boilerplate code for performing a modify version action, i.e. schedule, unschedule, restart and set priority
func ModifyVersionHandler(ctx context.Context, dataConnector data.Connector, patchID string, modifications VersionModifications) error {
version, err := dataConnector.FindVersionById(patchID)
if err != nil {
return ResourceNotFound.Send(ctx, fmt.Sprintf("error finding version %s: %s", patchID, err.Error()))
}
user := MustHaveUser(ctx)
httpStatus, err := ModifyVersion(*version, *user, nil, modifications)
if err != nil {
return mapHTTPStatusToGqlError(ctx, httpStatus, err)
}
if evergreen.IsPatchRequester(version.Requester) {
// restart is handled through graphql because we need the user to specify
// which downstream tasks they want to restart
if modifications.Action != Restart {
//do the same for child patches
p, err := patch.FindOneId(patchID)
if err != nil {
return ResourceNotFound.Send(ctx, fmt.Sprintf("error finding patch %s: %s", patchID, err.Error()))
}
if p == nil {
return ResourceNotFound.Send(ctx, fmt.Sprintf("patch '%s' not found ", patchID))
}
if p.IsParent() {
for _, childPatchId := range p.Triggers.ChildPatches {
p, err := patch.FindOneId(childPatchId)
if err != nil {
return ResourceNotFound.Send(ctx, fmt.Sprintf("error finding child patch %s: %s", childPatchId, err.Error()))
}
if p == nil {
return ResourceNotFound.Send(ctx, fmt.Sprintf("child patch '%s' not found ", childPatchId))
}
// only modify the child patch if it is finalized
if p.Version != "" {
err = ModifyVersionHandler(ctx, dataConnector, childPatchId, modifications)
if err != nil {
return errors.Wrap(mapHTTPStatusToGqlError(ctx, httpStatus, err), fmt.Sprintf("error modifying child patch '%s'", patchID))
}
}
}
}
}
}
return nil
}
func mapHTTPStatusToGqlError(ctx context.Context, httpStatus int, err error) *gqlerror.Error {
switch httpStatus {
case http.StatusInternalServerError:
return InternalServerError.Send(ctx, err.Error())
case http.StatusNotFound:
return ResourceNotFound.Send(ctx, err.Error())
case http.StatusUnauthorized:
return Forbidden.Send(ctx, err.Error())
case http.StatusBadRequest:
return InputValidationError.Send(ctx, err.Error())
default:
return InternalServerError.Send(ctx, err.Error())
}
}
func isTaskBlocked(ctx context.Context, at *restModel.APITask) (*bool, error) {
t, err := task.FindOneIdNewOrOld(*at.Id)
if err != nil {
return nil, ResourceNotFound.Send(ctx, err.Error())
}
if t == nil {
return nil, ResourceNotFound.Send(ctx, fmt.Sprintf("task %s not found", *at.Id))
}
isBlocked := t.Blocked()
return &isBlocked, nil
}
func isExecutionTask(ctx context.Context, at *restModel.APITask) (*bool, error) {
i, err := at.ToService()
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("Error while converting task %s to service", *at.Id))
}
t, ok := i.(*task.Task)
if !ok {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("Unable to convert APITask %s to Task", *at.Id))
}
isExecutionTask := t.IsPartOfDisplay()
return &isExecutionTask, nil
}
func canRestartTask(ctx context.Context, at *restModel.APITask) (*bool, error) {
taskBlocked, err := isTaskBlocked(ctx, at)
if err != nil {
return nil, err
}
nonrestartableStatuses := []string{evergreen.TaskStarted, evergreen.TaskUnstarted, evergreen.TaskUndispatched, evergreen.TaskDispatched, evergreen.TaskInactive}
canRestart := !utility.StringSliceContains(nonrestartableStatuses, *at.Status) || at.Aborted || (at.DisplayOnly && *taskBlocked)
isExecTask, err := isExecutionTask(ctx, at) // Cant restart execution tasks.
if err != nil {
return nil, err
}
if *isExecTask {
canRestart = false
}
return &canRestart, nil
}
func getAllTaskStatuses(tasks []task.Task) []string {
statusesMap := map[string]bool{}
for _, task := range tasks {
statusesMap[task.GetDisplayStatus()] = true
}
statusesArr := []string{}
for key := range statusesMap {
statusesArr = append(statusesArr, key)
}
sort.SliceStable(statusesArr, func(i, j int) bool {
return statusesArr[i] < statusesArr[j]
})
return statusesArr
}
func formatDuration(duration string) string {
regex := regexp.MustCompile(`\d*[dhms]`)
return strings.TrimSpace(regex.ReplaceAllStringFunc(duration, func(m string) string {
return m + " "
}))
}
func getResourceTypeAndIdFromSubscriptionSelectors(ctx context.Context, selectors []restModel.APISelector) (string, string, error) {
var id string
var idType string
for _, s := range selectors {
if s.Type == nil {
return "", "", InputValidationError.Send(ctx, "Found nil for selector type. Selector type must be a string and not nil.")
}
// Don't exit the loop for object and id because together they
// describe the resource id and resource type for the subscription
switch *s.Type {
case "object":
idType = *s.Data
case "id":
id = *s.Data
case "project":
idType = "project"
id = *s.Data
return idType, id, nil
case "in-version":
idType = "version"
id = *s.Data
return idType, id, nil
}
}
if idType == "" || id == "" {
return "", "", InputValidationError.Send(ctx, "Selectors do not indicate a target version, build, project, or task ID")
}
return idType, id, nil
}
func savePublicKey(ctx context.Context, publicKeyInput PublicKeyInput) error {
if doesPublicKeyNameAlreadyExist(ctx, publicKeyInput.Name) {
return InputValidationError.Send(ctx, fmt.Sprintf("Provided key name, %s, already exists.", publicKeyInput.Name))
}
err := verifyPublicKey(ctx, publicKeyInput)
if err != nil {
return err
}
err = MustHaveUser(ctx).AddPublicKey(publicKeyInput.Name, publicKeyInput.Key)
if err != nil {
return InternalServerError.Send(ctx, fmt.Sprintf("Error saving public key: %s", err.Error()))
}
return nil
}
func verifyPublicKey(ctx context.Context, publicKey PublicKeyInput) error {
if publicKey.Name == "" {
return InputValidationError.Send(ctx, fmt.Sprintf("Provided public key name cannot be empty."))
}
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(publicKey.Key))
if err != nil {
return InputValidationError.Send(ctx, fmt.Sprintf("Provided public key is invalid : %s", err.Error()))
}
return nil
}
func doesPublicKeyNameAlreadyExist(ctx context.Context, publicKeyName string) bool {
publicKeys := MustHaveUser(ctx).PublicKeys()
for _, pubKey := range publicKeys {
if pubKey.Name == publicKeyName {
return true
}
}
return false
}
func getMyPublicKeys(ctx context.Context) []*restModel.APIPubKey {
usr := MustHaveUser(ctx)
publicKeys := []*restModel.APIPubKey{}
for _, item := range usr.PublicKeys() {
currName := item.Name
currKey := item.Key
publicKeys = append(publicKeys, &restModel.APIPubKey{Name: &currName, Key: &currKey})
}
sort.SliceStable(publicKeys, func(i, j int) bool {
return *publicKeys[i].Name < *publicKeys[j].Name
})
return publicKeys
}
// To be moved to a better home when we restructure the resolvers.go file
// TerminateSpawnHost is a shared utility function to terminate a spawn host
func TerminateSpawnHost(ctx context.Context, env evergreen.Environment, h *host.Host, u *user.DBUser, r *http.Request) (*host.Host, int, error) {
if h.Status == evergreen.HostTerminated {
err := errors.New(fmt.Sprintf("Host %v is already terminated", h.Id))
return nil, http.StatusBadRequest, err
}
if err := cloud.TerminateSpawnHost(ctx, env, h, u.Id, fmt.Sprintf("terminated via UI by %s", u.Username())); err != nil {
logError(ctx, err, r)
return nil, http.StatusInternalServerError, err
}
return h, http.StatusOK, nil
}
// StopSpawnHost is a shared utility function to Stop a running spawn host
func StopSpawnHost(ctx context.Context, env evergreen.Environment, h *host.Host, u *user.DBUser, r *http.Request) (*host.Host, int, error) {
if h.Status == evergreen.HostStopped || h.Status == evergreen.HostStopping {
err := errors.New(fmt.Sprintf("Host %v is already stopping or stopped", h.Id))
return nil, http.StatusBadRequest, err
}
if h.Status != evergreen.HostRunning {
err := errors.New(fmt.Sprintf("Host %v is not running", h.Id))
return nil, http.StatusBadRequest, err
}
// Stop the host
ts := utility.RoundPartOfMinute(1).Format(units.TSFormat)
stopJob := units.NewSpawnhostStopJob(h, u.Id, ts)
if err := env.RemoteQueue().Put(ctx, stopJob); err != nil {
logError(ctx, err, r)
return nil, http.StatusInternalServerError, err
}
return h, http.StatusOK, nil
}
// StartSpawnHost is a shared utility function to Start a stopped spawn host
func StartSpawnHost(ctx context.Context, env evergreen.Environment, h *host.Host, u *user.DBUser, r *http.Request) (*host.Host, int, error) {
if h.Status == evergreen.HostStarting || h.Status == evergreen.HostRunning {
err := errors.New(fmt.Sprintf("Host %v is already starting or running", h.Id))
return nil, http.StatusBadRequest, err
}
// Start the host
ts := utility.RoundPartOfMinute(1).Format(units.TSFormat)
startJob := units.NewSpawnhostStartJob(h, u.Id, ts)
if err := env.RemoteQueue().Put(ctx, startJob); err != nil {
logError(ctx, err, r)
return nil, http.StatusInternalServerError, err
}
return h, http.StatusOK, nil
}
// UpdateHostPassword is a shared utility function to change the password on a windows host
func UpdateHostPassword(ctx context.Context, env evergreen.Environment, h *host.Host, u *user.DBUser, pwd string, r *http.Request) (*host.Host, int, error) {
if !h.Distro.IsWindows() {
return nil, http.StatusBadRequest, errors.New("rdp password can only be set on Windows hosts")
}
if !host.ValidateRDPPassword(pwd) {
return nil, http.StatusBadRequest, errors.New("Invalid password")
}
if err := cloud.SetHostRDPPassword(ctx, env, h, pwd); err != nil {
return nil, http.StatusInternalServerError, err
}
return h, http.StatusOK, nil
}
func logError(ctx context.Context, err error, r *http.Request) {
var method = "POST"
var url, _ = url.Parse("/graphql/query")
if r != nil {
method = r.Method
url = r.URL
}
grip.Error(message.WrapError(err, message.Fields{
"method": method,
"url": url,
"code": http.StatusInternalServerError,
"request": gimlet.GetRequestID(ctx),
"stack": string(debug.Stack()),
}))
}
// CanUpdateSpawnHost is a shared utility function to determine a users permissions to modify a spawn host
func CanUpdateSpawnHost(host *host.Host, usr *user.DBUser) bool {
if usr.Username() != host.StartedBy {
if !usr.HasPermission(gimlet.PermissionOpts{
Resource: host.Distro.Id,
ResourceType: evergreen.DistroResourceType,
Permission: evergreen.PermissionHosts,
RequiredLevel: evergreen.HostsEdit.Value,
}) {
return false
}
return true
}
return true
}
func GetMyVolumes(user *user.DBUser) ([]restModel.APIVolume, error) {
volumes, err := host.FindVolumesByUser(user.Username())
if err != nil {
return nil, errors.Wrapf(err, "error getting volumes for '%s'", user.Username())
}
sort.SliceStable(volumes, func(i, j int) bool {
// sort order: mounted < not mounted, expiration time asc
volumeI := volumes[i]
volumeJ := volumes[j]
isMountedI := volumeI.Host == ""
isMountedJ := volumeJ.Host == ""
if isMountedI == isMountedJ {
return volumeI.Expiration.Before(volumeJ.Expiration)
}
return isMountedJ
})
apiVolumes := make([]restModel.APIVolume, 0, len(volumes))
for _, vol := range volumes {
apiVolume := restModel.APIVolume{}
if err = apiVolume.BuildFromService(vol); err != nil {
return nil, errors.Wrapf(err, "error building volume '%s' from service", vol.ID)
}
apiVolumes = append(apiVolumes, apiVolume)
}
return apiVolumes, nil
}
func DeleteVolume(ctx context.Context, volumeId string) (bool, int, GqlError, error) {
if volumeId == "" {
return false, http.StatusBadRequest, InputValidationError, errors.New("must specify volume id")
}
vol, err := host.FindVolumeByID(volumeId)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't get volume '%s'", volumeId)
}
if vol == nil {
return false, http.StatusBadRequest, ResourceNotFound, errors.Errorf("volume '%s' does not exist", volumeId)
}
if vol.Host != "" {
success, statusCode, gqlErr, detachErr := DetachVolume(ctx, volumeId)
if err != nil {
return success, statusCode, gqlErr, detachErr
}
}
mgr, err := getEC2Manager(ctx, vol)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, err
}
err = mgr.DeleteVolume(ctx, vol)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't delete volume '%s'", vol.ID)
}
return true, http.StatusOK, "", nil
}
func AttachVolume(ctx context.Context, volumeId string, hostId string) (bool, int, GqlError, error) {
if volumeId == "" {
return false, http.StatusBadRequest, InputValidationError, errors.New("must specify volume id")
}
vol, err := host.FindVolumeByID(volumeId)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't get volume '%s'", volumeId)
}
if vol == nil {
return false, http.StatusBadRequest, ResourceNotFound, errors.Errorf("volume '%s' does not exist", volumeId)
}
mgr, err := getEC2Manager(ctx, vol)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, err
}
if hostId == "" {
return false, http.StatusBadRequest, InputValidationError, errors.New("must specify host id")
}
var h *host.Host
h, err = host.FindOneId(hostId)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't get host '%s'", vol.Host)
}
if h == nil {
return false, http.StatusBadRequest, ResourceNotFound, errors.Errorf("host '%s' does not exist", hostId)
}
if vol.AvailabilityZone != h.Zone {
return false, http.StatusBadRequest, InputValidationError, errors.New("host and volume must have same availability zone")
}
if err = mgr.AttachVolume(ctx, h, &host.VolumeAttachment{VolumeID: vol.ID}); err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't attach volume '%s'", vol.ID)
}
return true, http.StatusOK, "", nil
}
func DetachVolume(ctx context.Context, volumeId string) (bool, int, GqlError, error) {
if volumeId == "" {
return false, http.StatusBadRequest, InputValidationError, errors.New("must specify volume id")
}
vol, err := host.FindVolumeByID(volumeId)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't get volume '%s'", volumeId)
}
if vol == nil {
return false, http.StatusBadRequest, ResourceNotFound, errors.Errorf("volume '%s' does not exist", volumeId)
}
mgr, err := getEC2Manager(ctx, vol)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, err
}
if vol.Host == "" {
return false, http.StatusBadRequest, InputValidationError, errors.Errorf("volume '%s' is not attached", vol.ID)
}
h, err := host.FindOneId(vol.Host)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't get host '%s' for volume '%s'", vol.Host, vol.ID)
}
if h == nil {
if err = host.UnsetVolumeHost(vol.ID); err != nil {
grip.Error(message.WrapError(err, message.Fields{
"message": fmt.Sprintf("can't clear host '%s' from volume '%s'", vol.Host, vol.ID),
"route": "graphql/util",
"action": "DetachVolume",
}))
}
return false, http.StatusInternalServerError, InternalServerError, errors.Errorf("host '%s' for volume '%s' doesn't exist", vol.Host, vol.ID)
}
if err := mgr.DetachVolume(ctx, h, vol.ID); err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrapf(err, "can't detach volume '%s'", vol.ID)
}
return true, http.StatusOK, "", nil
}
func getEC2Manager(ctx context.Context, vol *host.Volume) (cloud.Manager, error) {
provider := evergreen.ProviderNameEc2OnDemand
if isTest() {
// Use the mock manager during integration tests
provider = evergreen.ProviderNameMock
}
mgrOpts := cloud.ManagerOpts{
Provider: provider,
Region: cloud.AztoRegion(vol.AvailabilityZone),
}
env := evergreen.GetEnvironment()
mgr, err := cloud.GetManager(ctx, env, mgrOpts)
return mgr, errors.Wrapf(err, "can't get manager for volume '%s'", vol.ID)
}
// returns true only during integration tests
func isTest() bool {
return os.Getenv("SETTINGS_OVERRIDE") != ""
}
func SpawnHostForTestCode(ctx context.Context, vol *host.Volume, h *host.Host) error {
mgr, err := getEC2Manager(ctx, vol)
if err != nil {
return err
}
if isTest() {
// The mock manager needs to spawn the host specified in our test data.
// The host should already be spawned in a non-test scenario.
_, err := mgr.SpawnHost(ctx, h)
if err != nil {
return errors.Wrapf(err, "error spawning host in test code")
}
}
return nil
}
func MustHaveUser(ctx context.Context) *user.DBUser {
u := gimlet.GetUser(ctx)
if u == nil {
grip.Error(message.Fields{
"message": "no user attached to request expecting user",
})
return &user.DBUser{}
}
usr, valid := u.(*user.DBUser)
if !valid {
grip.Error(message.Fields{
"message": "invalid user attached to request expecting user",
})
return &user.DBUser{}
}
return usr
}
func GetVolumeFromSpawnVolumeInput(spawnVolumeInput SpawnVolumeInput) host.Volume {
return host.Volume{
AvailabilityZone: spawnVolumeInput.AvailabilityZone,
Size: spawnVolumeInput.Size,
Type: spawnVolumeInput.Type,
}
}
func RequestNewVolume(ctx context.Context, volume host.Volume) (bool, int, GqlError, error, *host.Volume) {
authedUser := MustHaveUser(ctx)
if volume.Size == 0 {
return false, http.StatusBadRequest, InputValidationError, errors.New("Must specify volume size"), nil
}
err := cloud.ValidVolumeOptions(&volume, evergreen.GetEnvironment().Settings())
if err != nil {
return false, http.StatusBadRequest, InputValidationError, err, nil
}
volume.CreatedBy = authedUser.Id
mgr, err := getEC2Manager(ctx, &volume)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, err, nil
}
vol, err := mgr.CreateVolume(ctx, &volume)
if err != nil {
return false, http.StatusInternalServerError, InternalServerError, errors.Wrap(err, "error creating volume"), nil
}
return true, http.StatusOK, "", nil, vol
}
func validateVolumeExpirationInput(ctx context.Context, expirationTime *time.Time, noExpiration *bool) error {
if expirationTime != nil && noExpiration != nil && *noExpiration == true {
return InputValidationError.Send(ctx, "Cannot apply an expiration time AND set volume as non-expirable")
}
return nil
}
func validateVolumeName(ctx context.Context, name *string) error {
if name == nil {
return nil
}
if *name == "" {
return InputValidationError.Send(ctx, "Name cannot be empty.")
}
myVolumes, err := GetMyVolumes(MustHaveUser(ctx))
if err != nil {
return err
}
for _, vol := range myVolumes {
if *name == *vol.ID || *name == *vol.DisplayName {
return InputValidationError.Send(ctx, "The provided volume name is already in use")
}
}
return nil
}
func applyVolumeOptions(ctx context.Context, volume host.Volume, volumeOptions restModel.VolumeModifyOptions) error {
// modify volume if volume options is not empty
if volumeOptions != (restModel.VolumeModifyOptions{}) {
mgr, err := getEC2Manager(ctx, &volume)
if err != nil {
return err
}
err = mgr.ModifyVolume(ctx, &volume, &volumeOptions)
if err != nil {
return InternalServerError.Send(ctx, fmt.Sprintf("Unable to apply expiration options to volume %s: %s", volume.ID, err.Error()))
}
}
return nil
}
func setVersionActivationStatus(sc data.Connector, version *model.Version) error {
defaultSort := []task.TasksSortOrder{
{Key: task.DisplayNameKey, Order: 1},
}
opts := data.TaskFilterOptions{
Sorts: defaultSort,
}
tasks, _, err := sc.FindTasksByVersion(version.Id, opts)
if err != nil {
return errors.Wrapf(err, "error getting tasks for version %s", version.Id)
}
if !task.AnyActiveTasks(tasks) {
return errors.Wrapf(version.SetNotActivated(), "Error updating version activated status for `%s`", version.Id)
} else {
return errors.Wrapf(version.SetActivated(), "Error updating version activated status for `%s`", version.Id)
}
}
func (buildVariantOptions *BuildVariantOptions) isPopulated() bool {
if buildVariantOptions == nil {
return false
}
return len(buildVariantOptions.Tasks) > 0 || len(buildVariantOptions.Variants) > 0 || len(buildVariantOptions.Statuses) > 0
}
func getRedactedAPIVarsForProject(ctx context.Context, projectId string) (*restModel.APIProjectVars, error) {
vars, err := model.FindOneProjectVars(projectId)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("error finding project vars for '%s': %s", projectId, err.Error()))
}
if vars == nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("vars for '%s' don't exist", projectId))
}
vars = vars.RedactPrivateVars()
res := &restModel.APIProjectVars{}
if err = res.BuildFromService(vars); err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("problem building APIProjectVars from service: %s", err.Error()))
}
return res, nil
}
func getAPIAliasesForProject(ctx context.Context, projectId string) ([]*restModel.APIProjectAlias, error) {
aliases, err := model.FindAliasesForProjectFromDb(projectId)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("error finding aliases for project: %s", err.Error()))
}
res := []*restModel.APIProjectAlias{}
for _, alias := range aliases {
apiAlias := restModel.APIProjectAlias{}
if err = apiAlias.BuildFromService(alias); err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("problem building APIPProjectAlias %s from service: %s",
alias.Alias, err.Error()))
}
res = append(res, &apiAlias)
}
return res, nil
}
func getAPISubscriptionsForProject(ctx context.Context, projectId string) ([]*restModel.APISubscription, error) {
subscriptions, err := event.FindSubscriptionsByOwner(projectId, event.OwnerTypeProject)
if err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("error finding subscription for project: %s", err.Error()))
}
res := []*restModel.APISubscription{}
for _, sub := range subscriptions {
apiSubscription := restModel.APISubscription{}
if err = apiSubscription.BuildFromService(sub); err != nil {
return nil, InternalServerError.Send(ctx, fmt.Sprintf("problem building APIPProjectSubscription %s from service: %s",
sub.ID, err.Error()))
}
res = append(res, &apiSubscription)
}
return res, nil
}
func getPointerEventList(events []restModel.APIProjectEvent) []*restModel.APIProjectEvent {
res := make([]*restModel.APIProjectEvent, len(events))
for i := range events {
res[i] = &events[i]
}
return res
}
// GroupProjects takes a list of projects and groups them by their repo. If onlyDefaultedToRepo is true,
// it groups projects that defaulted to the repo under that repo and groups the rest under "".
func GroupProjects(projects []model.ProjectRef, onlyDefaultedToRepo bool) ([]*GroupedProjects, error) {
groupsMap := make(map[string][]*restModel.APIProjectRef)
for _, p := range projects {
groupName := fmt.Sprintf("%s/%s", p.Owner, p.Repo)
if onlyDefaultedToRepo && !p.UseRepoSettings() {
groupName = ""
}
apiProjectRef := restModel.APIProjectRef{}
if err := apiProjectRef.BuildFromService(p); err != nil {
return nil, errors.Wrap(err, "error building APIProjectRef from service")
}
if projs, ok := groupsMap[groupName]; ok {
groupsMap[groupName] = append(projs, &apiProjectRef)
} else {
groupsMap[groupName] = []*restModel.APIProjectRef{&apiProjectRef}
}
}
groupsArr := []*GroupedProjects{}
for groupName, groupedProjects := range groupsMap {
gp := GroupedProjects{
Name: groupName, //deprecated
GroupDisplayName: groupName,
Projects: groupedProjects,
}
project := groupedProjects[0]
if utility.FromBoolPtr(project.UseRepoSettings) {
repoRefId := utility.FromStringPtr(project.RepoRefId)
repoRef, err := model.FindOneRepoRef(repoRefId)
if err != nil {
return nil, err
}
if repoRef == nil {
grip.Error(message.Fields{
"message": "repoRef not found",
"repo_ref_id": repoRefId,
"project": project,
})
} else {
apiRepoRef := restModel.APIProjectRef{}
if err := apiRepoRef.BuildFromService(repoRef.ProjectRef); err != nil {
return nil, errors.Wrap(err, "error building the repo's ProjectRef from service")
}
gp.Repo = &apiRepoRef
if repoRef.ProjectRef.DisplayName != "" {
gp.GroupDisplayName = repoRef.ProjectRef.DisplayName
}
}
}
groupsArr = append(groupsArr, &gp)
}
sort.SliceStable(groupsArr, func(i, j int) bool {
return groupsArr[i].GroupDisplayName < groupsArr[j].GroupDisplayName
})
return groupsArr, nil
}
|
[
"\"SETTINGS_OVERRIDE\""
] |
[] |
[
"SETTINGS_OVERRIDE"
] |
[]
|
["SETTINGS_OVERRIDE"]
|
go
| 1 | 0 | |
pkg/protocol/grpc/server.go
|
package grpc
import (
"context"
"github.com/103cuong/go_grpc/pkg/api/v1"
"google.golang.org/grpc"
"log"
"net"
"os"
"os/signal"
)
func RunServer(ctx context.Context, v1API v1.ToDoServiceServer, port string) error {
listen, err := net.Listen("tcp", ":" + port)
if err != nil {
return err
}
// register service
server := grpc.NewServer()
v1.RegisterToDoServiceServer(server, v1API)
// graceful shutdown
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
// sig is a ^C, handle it
log.Println("shutting down gRPC server...")
server.GracefulStop()
<-ctx.Done()
}
}()
// start gRPC server
log.Printf("💅 server ready at 0.0.0.0:%s", port)
return server.Serve(listen)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
examples/service/api/sip_credential/create/credential_create_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v2010 "github.com/RJPearson94/twilio-sdk-go/service/api/v2010"
sipCredentials "github.com/RJPearson94/twilio-sdk-go/service/api/v2010/account/sip/credential_list/credentials"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
"github.com/google/uuid"
)
var apiClient *v2010.V2010
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
apiClient = twilio.NewWithCredentials(creds).API.V2010
}
func main() {
resp, err := apiClient.
Account("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Sip.
CredentialList("CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Credentials.
Create(&sipCredentials.CreateCredentialInput{
Username: uuid.New().String()[0:32],
Password: "Test" + uuid.New().String(),
})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
train-app/helper_functions.py
|
import json
import os
import pandas
import redis
import types
def json2redis(data,r):
if isinstance(data, types.ListType):
for row in data:
channel = row['channel']
data_type = row['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,row)
else:
channel = data['channel']
data_type = data['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,data)
# initialize redis connection for local and CF deployment
def connect_redis_db(redis_service_name = None):
if os.getenv('NODE_ENV') == 'micropcf':
DB_HOST = os.getenv('REDIS_HOST')
DB_PORT = os.getenv('REDIS_PORT')
DB_PW = os.getenv('REDIS_PASSWORD')
REDIS_DB = 0
elif os.environ.get('VCAP_SERVICES') is None: # running locally
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else: # running on CF
env_vars = os.environ['VCAP_SERVICES']
rediscloud_service = json.loads(env_vars)[redis_service_name][0]
credentials = rediscloud_service['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = password=credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
|
[] |
[] |
[
"REDIS_PORT",
"VCAP_SERVICES",
"REDIS_PASSWORD",
"REDIS_HOST",
"NODE_ENV"
] |
[]
|
["REDIS_PORT", "VCAP_SERVICES", "REDIS_PASSWORD", "REDIS_HOST", "NODE_ENV"]
|
python
| 5 | 0 | |
examples/url/shorten/go/shortenALongUrl.go
|
package example
import (
"fmt"
"os"
"github.com/micro/services/clients/go/url"
)
// Shortens a destination URL and returns a full short URL.
func ShortenAlongUrl() {
urlService := url.NewUrlService(os.Getenv("MICRO_API_TOKEN"))
rsp, err := urlService.Shorten(&url.ShortenRequest{})
fmt.Println(rsp, err)
}
|
[
"\"MICRO_API_TOKEN\""
] |
[] |
[
"MICRO_API_TOKEN"
] |
[]
|
["MICRO_API_TOKEN"]
|
go
| 1 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend B2Cs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a blackbearcoind or blackbearcoin-qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the blackbearcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/B2C/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "B2C")
return os.path.expanduser("~/.blackbearcoin")
def read_bitcoin_config(dbdir):
"""Read the blackbearcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "blackbearcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a blackbearcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 31245 if testnet else 17791
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the blackbearcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(blackbearcoind):
info = blackbearcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
blackbearcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = blackbearcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(blackbearcoind):
address_summary = dict()
address_to_account = dict()
for info in blackbearcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = blackbearcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = blackbearcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-blackbearcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(blackbearcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(blackbearcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to blackbearcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = blackbearcoind.createrawtransaction(inputs, outputs)
signed_rawtx = blackbearcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(blackbearcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = blackbearcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(blackbearcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = blackbearcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(blackbearcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get B2Cs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send B2Cs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of blackbearcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
blackbearcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(blackbearcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(blackbearcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(blackbearcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(blackbearcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = blackbearcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
vendor/github.com/elastic/beats/dev-tools/mage/settings.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"golang.org/x/tools/go/vcs"
)
const (
fpmVersion = "1.10.0"
// Docker images. See https://github.com/elastic/golang-crossbuild.
beatsFPMImage = "docker.elastic.co/beats-dev/fpm"
// BeatsCrossBuildImage is the image used for crossbuilding Beats.
BeatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild"
elasticBeatsImportPath = "github.com/elastic/beats"
)
// Common settings with defaults derived from files, CWD, and environment.
var (
GOOS = build.Default.GOOS
GOARCH = build.Default.GOARCH
GOARM = EnvOr("GOARM", "")
Platform = MakePlatformAttributes(GOOS, GOARCH, GOARM)
BinaryExt = ""
RaceDetector = false
TestCoverage = false
BeatName = EnvOr("BEAT_NAME", filepath.Base(CWD()))
BeatServiceName = EnvOr("BEAT_SERVICE_NAME", BeatName)
BeatIndexPrefix = EnvOr("BEAT_INDEX_PREFIX", BeatName)
BeatDescription = EnvOr("BEAT_DESCRIPTION", "")
BeatVendor = EnvOr("BEAT_VENDOR", "Elastic")
BeatLicense = EnvOr("BEAT_LICENSE", "ASL 2.0")
BeatURL = EnvOr("BEAT_URL", "https://www.elastic.co/products/beats/"+BeatName)
Snapshot bool
versionQualified bool
versionQualifier string
FuncMap = map[string]interface{}{
"beat_doc_branch": BeatDocBranch,
"beat_version": BeatQualifiedVersion,
"commit": CommitHash,
"date": BuildDate,
"elastic_beats_dir": ElasticBeatsDir,
"go_version": GoVersion,
"repo": GetProjectRepoInfo,
"title": strings.Title,
"tolower": strings.ToLower,
}
)
func init() {
if GOOS == "windows" {
BinaryExt = ".exe"
}
var err error
RaceDetector, err = strconv.ParseBool(EnvOr("RACE_DETECTOR", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse RACE_DETECTOR env value"))
}
TestCoverage, err = strconv.ParseBool(EnvOr("TEST_COVERAGE", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse TEST_COVERAGE env value"))
}
Snapshot, err = strconv.ParseBool(EnvOr("SNAPSHOT", "false"))
if err != nil {
panic(errors.Errorf("failed to parse SNAPSHOT env value", err))
}
versionQualifier, versionQualified = os.LookupEnv("BEAT_VERSION_QUALIFIER")
}
// EnvMap returns map containing the common settings variables and all variables
// from the environment. args are appended to the output prior to adding the
// environment variables (so env vars have the highest precedence).
func EnvMap(args ...map[string]interface{}) map[string]interface{} {
envMap := varMap(args...)
// Add the environment (highest precedence).
for _, e := range os.Environ() {
env := strings.SplitN(e, "=", 2)
envMap[env[0]] = env[1]
}
return envMap
}
func varMap(args ...map[string]interface{}) map[string]interface{} {
data := map[string]interface{}{
"GOOS": GOOS,
"GOARCH": GOARCH,
"GOARM": GOARM,
"Platform": Platform,
"BinaryExt": BinaryExt,
"BeatName": BeatName,
"BeatServiceName": BeatServiceName,
"BeatIndexPrefix": BeatIndexPrefix,
"BeatDescription": BeatDescription,
"BeatVendor": BeatVendor,
"BeatLicense": BeatLicense,
"BeatURL": BeatURL,
"Snapshot": Snapshot,
"Qualifier": versionQualifier,
}
// Add the extra args to the map.
for _, m := range args {
for k, v := range m {
data[k] = v
}
}
return data
}
func dumpVariables() (string, error) {
var dumpTemplate = `## Variables
GOOS = {{.GOOS}}
GOARCH = {{.GOARCH}}
GOARM = {{.GOARM}}
Platform = {{.Platform}}
BinaryExt = {{.BinaryExt}}
BeatName = {{.BeatName}}
BeatServiceName = {{.BeatServiceName}}
BeatIndexPrefix = {{.BeatIndexPrefix}}
BeatDescription = {{.BeatDescription}}
BeatVendor = {{.BeatVendor}}
BeatLicense = {{.BeatLicense}}
BeatURL = {{.BeatURL}}
VersionQualifier = {{.Qualifier}}
## Functions
beat_doc_branch = {{ beat_doc_branch }}
beat_version = {{ beat_version }}
commit = {{ commit }}
date = {{ date }}
elastic_beats_dir = {{ elastic_beats_dir }}
go_version = {{ go_version }}
repo.RootImportPath = {{ repo.RootImportPath }}
repo.RootDir = {{ repo.RootDir }}
repo.ImportPath = {{ repo.ImportPath }}
repo.SubDir = {{ repo.SubDir }}
`
return Expand(dumpTemplate)
}
// DumpVariables writes the template variables and values to stdout.
func DumpVariables() error {
out, err := dumpVariables()
if err != nil {
return err
}
fmt.Println(out)
return nil
}
var (
commitHash string
commitHashOnce sync.Once
)
// CommitHash returns the full length git commit hash.
func CommitHash() (string, error) {
var err error
commitHashOnce.Do(func() {
commitHash, err = sh.Output("git", "rev-parse", "HEAD")
})
return commitHash, err
}
var (
elasticBeatsDirValue string
elasticBeatsDirErr error
elasticBeatsDirLock sync.Mutex
)
// ElasticBeatsDir returns the path to Elastic beats dir.
func ElasticBeatsDir() (string, error) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
if elasticBeatsDirValue != "" || elasticBeatsDirErr != nil {
return elasticBeatsDirValue, elasticBeatsDirErr
}
elasticBeatsDirValue, elasticBeatsDirErr = findElasticBeatsDir()
if elasticBeatsDirErr == nil {
log.Println("Found Elastic Beats dir at", elasticBeatsDirValue)
}
return elasticBeatsDirValue, elasticBeatsDirErr
}
// findElasticBeatsDir attempts to find the root of the Elastic Beats directory.
// It checks to see if the current project is elastic/beats, and then if not
// checks the vendor directory.
//
// If your project places the Beats files in a different location (specifically
// the dev-tools/ contents) then you can use SetElasticBeatsDir().
func findElasticBeatsDir() (string, error) {
repo, err := GetProjectRepoInfo()
if err != nil {
return "", err
}
if repo.IsElasticBeats() {
return repo.RootDir, nil
}
const devToolsImportPath = elasticBeatsImportPath + "/dev-tools/mage"
// Search in project vendor directories.
searchPaths := []string{
filepath.Join(repo.RootDir, repo.SubDir, "vendor", devToolsImportPath),
filepath.Join(repo.RootDir, "vendor", devToolsImportPath),
}
for _, path := range searchPaths {
if _, err := os.Stat(path); err == nil {
return filepath.Join(path, "../.."), nil
}
}
return "", errors.Errorf("failed to find %v in the project's vendor", devToolsImportPath)
}
// SetElasticBeatsDir explicitly sets the location of the Elastic Beats
// directory. If not set then it will attempt to locate it.
func SetElasticBeatsDir(dir string) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
info, err := os.Stat(dir)
if err != nil {
panic(errors.Wrapf(err, "failed to read elastic beats dir at %v", dir))
}
if !info.IsDir() {
panic(errors.Errorf("elastic beats dir=%v is not a directory", dir))
}
elasticBeatsDirValue = filepath.Clean(dir)
}
var (
buildDate = time.Now().UTC().Format(time.RFC3339)
)
// BuildDate returns the time that the build started.
func BuildDate() string {
return buildDate
}
var (
goVersionValue string
goVersionErr error
goVersionOnce sync.Once
)
// GoVersion returns the version of Go defined in the project's .go-version
// file.
func GoVersion() (string, error) {
goVersionOnce.Do(func() {
goVersionValue = os.Getenv("BEAT_GO_VERSION")
if goVersionValue != "" {
return
}
goVersionValue, goVersionErr = getBuildVariableSources().GetGoVersion()
})
return goVersionValue, goVersionErr
}
var (
beatVersionRegex = regexp.MustCompile(`(?m)^const defaultBeatVersion = "(.+)"\r?$`)
beatVersionValue string
beatVersionErr error
beatVersionOnce sync.Once
)
// BeatQualifiedVersion returns the Beat's qualified version. The value can be overwritten by
// setting BEAT_VERSION_QUALIFIER in the environment.
func BeatQualifiedVersion() (string, error) {
version, err := beatVersion()
if err != nil {
return "", err
}
// version qualifier can intentionally be set to "" to override build time var
if !versionQualified || versionQualifier == "" {
return version, nil
}
return version + "-" + versionQualifier, nil
}
// BeatVersion returns the Beat's version. The value can be overridden by
// setting BEAT_VERSION in the environment.
func beatVersion() (string, error) {
beatVersionOnce.Do(func() {
beatVersionValue = os.Getenv("BEAT_VERSION")
if beatVersionValue != "" {
return
}
beatVersionValue, beatVersionErr = getBuildVariableSources().GetBeatVersion()
})
return beatVersionValue, beatVersionErr
}
var (
beatDocBranchRegex = regexp.MustCompile(`(?m)doc-branch:\s*([^\s]+)\r?$`)
beatDocBranchValue string
beatDocBranchErr error
beatDocBranchOnce sync.Once
)
// BeatDocBranch returns the documentation branch name associated with the
// Beat branch.
func BeatDocBranch() (string, error) {
beatDocBranchOnce.Do(func() {
beatDocBranchValue = os.Getenv("BEAT_DOC_BRANCH")
if beatDocBranchValue != "" {
return
}
beatDocBranchValue, beatDocBranchErr = getBuildVariableSources().GetDocBranch()
})
return beatDocBranchValue, beatDocBranchErr
}
// --- BuildVariableSources
var (
// DefaultBeatBuildVariableSources contains the default locations build
// variables are read from by Elastic Beats.
DefaultBeatBuildVariableSources = &BuildVariableSources{
BeatVersion: "{{ elastic_beats_dir }}/libbeat/version/version.go",
GoVersion: "{{ elastic_beats_dir }}/.go-version",
DocBranch: "{{ elastic_beats_dir }}/libbeat/docs/version.asciidoc",
}
buildVariableSources *BuildVariableSources
buildVariableSourcesLock sync.Mutex
)
// SetBuildVariableSources sets the BuildVariableSources that defines where
// certain build data should be sourced from. Community Beats must call this.
func SetBuildVariableSources(s *BuildVariableSources) {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
buildVariableSources = s
}
func getBuildVariableSources() *BuildVariableSources {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
if buildVariableSources != nil {
return buildVariableSources
}
repo, err := GetProjectRepoInfo()
if err != nil {
panic(err)
}
if repo.IsElasticBeats() {
buildVariableSources = DefaultBeatBuildVariableSources
return buildVariableSources
}
panic(errors.Errorf("magefile must call mage.SetBuildVariableSources() "+
"because it is not an elastic beat (repo=%+v)", repo.RootImportPath))
}
// BuildVariableSources is used to explicitly define what files contain build
// variables and how to parse the values from that file. This removes ambiguity
// about where the data is sources and allows a degree of customization for
// community Beats.
//
// Default parsers are used if one is not defined.
type BuildVariableSources struct {
// File containing the Beat version.
BeatVersion string
// Parses the Beat version from the BeatVersion file.
BeatVersionParser func(data []byte) (string, error)
// File containing the Go version to be used in cross-builds.
GoVersion string
// Parses the Go version from the GoVersion file.
GoVersionParser func(data []byte) (string, error)
// File containing the documentation branch.
DocBranch string
// Parses the documentation branch from the DocBranch file.
DocBranchParser func(data []byte) (string, error)
}
func (s *BuildVariableSources) expandVar(in string) (string, error) {
return expandTemplate("inline", in, map[string]interface{}{
"elastic_beats_dir": ElasticBeatsDir,
})
}
// GetBeatVersion reads the BeatVersion file and parses the version from it.
func (s *BuildVariableSources) GetBeatVersion() (string, error) {
file, err := s.expandVar(s.BeatVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read beat version file=%v", file)
}
if s.BeatVersionParser == nil {
s.BeatVersionParser = parseBeatVersion
}
return s.BeatVersionParser(data)
}
// GetGoVersion reads the GoVersion file and parses the version from it.
func (s *BuildVariableSources) GetGoVersion() (string, error) {
file, err := s.expandVar(s.GoVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read go version file=%v", file)
}
if s.GoVersionParser == nil {
s.GoVersionParser = parseGoVersion
}
return s.GoVersionParser(data)
}
// GetDocBranch reads the DocBranch file and parses the branch from it.
func (s *BuildVariableSources) GetDocBranch() (string, error) {
file, err := s.expandVar(s.DocBranch)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read doc branch file=%v", file)
}
if s.DocBranchParser == nil {
s.DocBranchParser = parseDocBranch
}
return s.DocBranchParser(data)
}
func parseBeatVersion(data []byte) (string, error) {
matches := beatVersionRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat version file")
}
func parseGoVersion(data []byte) (string, error) {
return strings.TrimSpace(string(data)), nil
}
func parseDocBranch(data []byte) (string, error) {
matches := beatDocBranchRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat doc branch")
}
// --- ProjectRepoInfo
// ProjectRepoInfo contains information about the project's repo.
type ProjectRepoInfo struct {
RootImportPath string // Import path at the project root.
RootDir string // Root directory of the project.
ImportPath string // Import path of the current directory.
SubDir string // Relative path from the root dir to the current dir.
}
// IsElasticBeats returns true if the current project is
// github.com/elastic/beats.
func (r *ProjectRepoInfo) IsElasticBeats() bool {
return r.RootImportPath == elasticBeatsImportPath
}
var (
repoInfoValue *ProjectRepoInfo
repoInfoErr error
repoInfoOnce sync.Once
)
// GetProjectRepoInfo returns information about the repo including the root
// import path and the current directory's import path.
func GetProjectRepoInfo() (*ProjectRepoInfo, error) {
repoInfoOnce.Do(func() {
repoInfoValue, repoInfoErr = getProjectRepoInfo()
})
return repoInfoValue, repoInfoErr
}
func getProjectRepoInfo() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
rootImportPath string
srcDir string
)
// Search upward from the CWD to determine the project root based on VCS.
var errs []string
for _, gopath := range filepath.SplitList(build.Default.GOPATH) {
gopath = filepath.Clean(gopath)
if !strings.HasPrefix(cwd, gopath) {
// Fixes an issue on macOS when /var is actually /private/var.
var err error
gopath, err = filepath.EvalSymlinks(gopath)
if err != nil {
errs = append(errs, err.Error())
continue
}
}
srcDir = filepath.Join(gopath, "src")
_, root, err := vcs.FromDir(cwd, srcDir)
if err != nil {
// Try the next gopath.
errs = append(errs, err.Error())
continue
}
rootImportPath = root
break
}
if rootImportPath == "" {
return nil, errors.Errorf("failed to determine root import path (Did "+
"you git init?, Is the project in the GOPATH? GOPATH=%v, CWD=%v?): %v",
build.Default.GOPATH, cwd, errs)
}
rootDir := filepath.Join(srcDir, rootImportPath)
subDir, err := filepath.Rel(rootDir, cwd)
if err != nil {
return nil, errors.Wrap(err, "failed to get relative path to repo root")
}
importPath := filepath.ToSlash(filepath.Join(rootImportPath, subDir))
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
RootDir: rootDir,
SubDir: subDir,
ImportPath: importPath,
}, nil
}
|
[
"\"BEAT_GO_VERSION\"",
"\"BEAT_VERSION\"",
"\"BEAT_DOC_BRANCH\""
] |
[] |
[
"BEAT_VERSION",
"BEAT_GO_VERSION",
"BEAT_DOC_BRANCH"
] |
[]
|
["BEAT_VERSION", "BEAT_GO_VERSION", "BEAT_DOC_BRANCH"]
|
go
| 3 | 0 | |
napari/_qt/dialogs/qt_notification.py
|
from __future__ import annotations
import os
from typing import Callable, Optional, Sequence, Tuple, Union
from qtpy.QtCore import (
QEasingCurve,
QObject,
QPoint,
QPropertyAnimation,
QRect,
QSize,
Qt,
QThread,
QTimer,
Signal,
)
from qtpy.QtWidgets import (
QApplication,
QDialog,
QGraphicsOpacityEffect,
QHBoxLayout,
QLabel,
QPushButton,
QSizePolicy,
QTextEdit,
QVBoxLayout,
QWidget,
)
from ...utils.notifications import Notification, NotificationSeverity
from ...utils.translations import trans
from ..widgets.qt_eliding_label import MultilineElidedLabel
ActionSequence = Sequence[Tuple[str, Callable[[], None]]]
class NotificationDispatcher(QObject):
"""
This is a helper class to allow the propagation of notifications
generated from exceptions or warnings inside threads.
"""
sig_notified = Signal(Notification)
class NapariQtNotification(QDialog):
"""Notification dialog frame, appears at the bottom right of the canvas.
By default, only the first line of the notification is shown, and the text
is elided. Double-clicking on the text (or clicking the chevron icon) will
expand to show the full notification. The dialog will autmatically
disappear in ``DISMISS_AFTER`` milliseconds, unless hovered or clicked.
Parameters
----------
message : str
The message that will appear in the notification
severity : str or NotificationSeverity, optional
Severity level {'error', 'warning', 'info', 'none'}. Will determine
the icon associated with the message.
by default NotificationSeverity.WARNING.
source : str, optional
A source string for the notifcation (intended to show the module and
or package responsible for the notification), by default None
actions : list of tuple, optional
A sequence of 2-tuples, where each tuple is a string and a callable.
Each tuple will be used to create button in the dialog, where the text
on the button is determine by the first item in the tuple, and a
callback function to call when the button is pressed is the second item
in the tuple. by default ()
"""
MAX_OPACITY = 0.9
FADE_IN_RATE = 220
FADE_OUT_RATE = 120
DISMISS_AFTER = 4000
MIN_WIDTH = 400
MIN_EXPANSION = 18
message: MultilineElidedLabel
source_label: QLabel
severity_icon: QLabel
def __init__(
self,
message: str,
severity: Union[str, NotificationSeverity] = 'WARNING',
source: Optional[str] = None,
actions: ActionSequence = (),
):
super().__init__()
from ..qt_main_window import _QtMainWindow
current_window = _QtMainWindow.current()
if current_window is not None:
canvas = current_window.qt_viewer._canvas_overlay
self.setParent(canvas)
canvas.resized.connect(self.move_to_bottom_right)
self.setupUi()
self.setAttribute(Qt.WA_DeleteOnClose)
self.setup_buttons(actions)
self.setMouseTracking(True)
self.severity_icon.setText(NotificationSeverity(severity).as_icon())
self.message.setText(message)
if source:
self.source_label.setText(
trans._('Source: {source}', source=source)
)
self.close_button.clicked.connect(self.close)
self.expand_button.clicked.connect(self.toggle_expansion)
self.timer = QTimer()
self.opacity = QGraphicsOpacityEffect()
self.setGraphicsEffect(self.opacity)
self.opacity_anim = QPropertyAnimation(self.opacity, b"opacity", self)
self.geom_anim = QPropertyAnimation(self, b"geometry", self)
self.move_to_bottom_right()
def move_to_bottom_right(self, offset=(8, 8)):
"""Position widget at the bottom right edge of the parent."""
if not self.parent():
return
sz = self.parent().size() - self.size() - QSize(*offset)
self.move(QPoint(sz.width(), sz.height()))
def slide_in(self):
"""Run animation that fades in the dialog with a slight slide up."""
geom = self.geometry()
self.geom_anim.setDuration(self.FADE_IN_RATE)
self.geom_anim.setStartValue(geom.translated(0, 20))
self.geom_anim.setEndValue(geom)
self.geom_anim.setEasingCurve(QEasingCurve.OutQuad)
# fade in
self.opacity_anim.setDuration(self.FADE_IN_RATE)
self.opacity_anim.setStartValue(0)
self.opacity_anim.setEndValue(self.MAX_OPACITY)
self.geom_anim.start()
self.opacity_anim.start()
def show(self):
"""Show the message with a fade and slight slide in from the bottom."""
super().show()
self.slide_in()
if self.DISMISS_AFTER > 0:
self.timer.setInterval(self.DISMISS_AFTER)
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.close)
self.timer.start()
def mouseMoveEvent(self, event):
"""On hover, stop the self-destruct timer"""
self.timer.stop()
def mouseDoubleClickEvent(self, event):
"""Expand the notification on double click."""
self.toggle_expansion()
def close(self):
"""Fade out then close."""
self.opacity_anim.setDuration(self.FADE_OUT_RATE)
self.opacity_anim.setStartValue(self.MAX_OPACITY)
self.opacity_anim.setEndValue(0)
self.opacity_anim.start()
self.opacity_anim.finished.connect(super().close)
def toggle_expansion(self):
"""Toggle the expanded state of the notification frame."""
self.contract() if self.property('expanded') else self.expand()
self.timer.stop()
def expand(self):
"""Expanded the notification so that the full message is visible."""
curr = self.geometry()
self.geom_anim.setDuration(100)
self.geom_anim.setStartValue(curr)
new_height = self.sizeHint().height()
if new_height < curr.height():
# new height would shift notification down, ensure some expansion
new_height = curr.height() + self.MIN_EXPANSION
delta = new_height - curr.height()
self.geom_anim.setEndValue(
QRect(curr.x(), curr.y() - delta, curr.width(), new_height)
)
self.geom_anim.setEasingCurve(QEasingCurve.OutQuad)
self.geom_anim.start()
self.setProperty('expanded', True)
self.style().unpolish(self.expand_button)
self.style().polish(self.expand_button)
def contract(self):
"""Contract notification to a single elided line of the message."""
geom = self.geometry()
self.geom_anim.setDuration(100)
self.geom_anim.setStartValue(geom)
dlt = geom.height() - self.minimumHeight()
self.geom_anim.setEndValue(
QRect(geom.x(), geom.y() + dlt, geom.width(), geom.height() - dlt)
)
self.geom_anim.setEasingCurve(QEasingCurve.OutQuad)
self.geom_anim.start()
self.setProperty('expanded', False)
self.style().unpolish(self.expand_button)
self.style().polish(self.expand_button)
def setupUi(self):
"""Set up the UI during initialization."""
self.setWindowFlags(Qt.SubWindow)
self.setMinimumWidth(self.MIN_WIDTH)
self.setMaximumWidth(self.MIN_WIDTH)
self.setMinimumHeight(40)
self.setSizeGripEnabled(False)
self.setModal(False)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setSpacing(0)
self.row1_widget = QWidget(self)
self.row1 = QHBoxLayout(self.row1_widget)
self.row1.setContentsMargins(12, 12, 12, 8)
self.row1.setSpacing(4)
self.severity_icon = QLabel(self.row1_widget)
self.severity_icon.setObjectName("severity_icon")
self.severity_icon.setMinimumWidth(30)
self.severity_icon.setMaximumWidth(30)
self.row1.addWidget(self.severity_icon, alignment=Qt.AlignTop)
self.message = MultilineElidedLabel(self.row1_widget)
self.message.setMinimumWidth(self.MIN_WIDTH - 200)
self.message.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding
)
self.row1.addWidget(self.message, alignment=Qt.AlignTop)
self.expand_button = QPushButton(self.row1_widget)
self.expand_button.setObjectName("expand_button")
self.expand_button.setCursor(Qt.PointingHandCursor)
self.expand_button.setMaximumWidth(20)
self.expand_button.setFlat(True)
self.row1.addWidget(self.expand_button, alignment=Qt.AlignTop)
self.close_button = QPushButton(self.row1_widget)
self.close_button.setObjectName("close_button")
self.close_button.setCursor(Qt.PointingHandCursor)
self.close_button.setMaximumWidth(20)
self.close_button.setFlat(True)
self.row1.addWidget(self.close_button, alignment=Qt.AlignTop)
self.verticalLayout.addWidget(self.row1_widget, 1)
self.row2_widget = QWidget(self)
self.row2_widget.hide()
self.row2 = QHBoxLayout(self.row2_widget)
self.source_label = QLabel(self.row2_widget)
self.source_label.setObjectName("source_label")
self.row2.addWidget(self.source_label, alignment=Qt.AlignBottom)
self.row2.addStretch()
self.row2.setContentsMargins(12, 2, 16, 12)
self.row2_widget.setMaximumHeight(34)
self.row2_widget.setStyleSheet(
'QPushButton{'
'padding: 4px 12px 4px 12px; '
'font-size: 11px;'
'min-height: 18px; border-radius: 0;}'
)
self.verticalLayout.addWidget(self.row2_widget, 0)
self.setProperty('expanded', False)
self.resize(self.MIN_WIDTH, 40)
def setup_buttons(self, actions: ActionSequence = ()):
"""Add buttons to the dialog.
Parameters
----------
actions : tuple, optional
A sequence of 2-tuples, where each tuple is a string and a
callable. Each tuple will be used to create button in the dialog,
where the text on the button is determine by the first item in the
tuple, and a callback function to call when the button is pressed
is the second item in the tuple. by default ()
"""
if isinstance(actions, dict):
actions = list(actions.items())
for text, callback in actions:
btn = QPushButton(text)
def call_back_with_self(callback, self):
"""
We need a higher order function this to capture the reference to self.
"""
def _inner():
return callback(self)
return _inner
btn.clicked.connect(call_back_with_self(callback, self))
btn.clicked.connect(self.close)
self.row2.addWidget(btn)
if actions:
self.row2_widget.show()
self.setMinimumHeight(
self.row2_widget.maximumHeight() + self.minimumHeight()
)
def sizeHint(self):
"""Return the size required to show the entire message."""
return QSize(
super().sizeHint().width(),
self.row2_widget.height() + self.message.sizeHint().height(),
)
@classmethod
def from_notification(
cls, notification: Notification
) -> NapariQtNotification:
from ...utils.notifications import ErrorNotification
actions = notification.actions
if isinstance(notification, ErrorNotification):
def show_tb(parent):
tbdialog = QDialog(parent=parent.parent())
tbdialog.setModal(True)
# this is about the minimum width to not get rewrap
# and the minimum height to not have scrollbar
tbdialog.resize(650, 270)
tbdialog.setLayout(QVBoxLayout())
text = QTextEdit()
text.setHtml(notification.as_html())
text.setReadOnly(True)
btn = QPushButton(trans._('Enter Debugger'))
def _enter_debug_mode():
btn.setText(
trans._(
'Now Debugging. Please quit debugger in console to continue'
)
)
_debug_tb(notification.exception.__traceback__)
btn.setText(trans._('Enter Debugger'))
btn.clicked.connect(_enter_debug_mode)
tbdialog.layout().addWidget(text)
tbdialog.layout().addWidget(btn, 0, Qt.AlignRight)
tbdialog.show()
actions = tuple(notification.actions) + (
(trans._('View Traceback'), show_tb),
)
else:
actions = notification.actions
return cls(
message=notification.message,
severity=notification.severity,
source=notification.source,
actions=actions,
)
@classmethod
def show_notification(cls, notification: Notification):
from ...utils.settings import get_settings
settings = get_settings()
# after https://github.com/napari/napari/issues/2370,
# the os.getenv can be removed (and NAPARI_CATCH_ERRORS retired)
if (
os.getenv("NAPARI_CATCH_ERRORS") not in ('0', 'False')
and notification.severity
>= settings.application.gui_notification_level
):
application_instance = QApplication.instance()
if application_instance:
# Check if this is running from a thread
if application_instance.thread() != QThread.currentThread():
dispatcher = getattr(
application_instance, "_dispatcher", None
)
if dispatcher:
dispatcher.sig_notified.emit(notification)
return
cls.from_notification(notification).show()
def _debug_tb(tb):
import pdb
from ..utils import event_hook_removed
QApplication.processEvents()
QApplication.processEvents()
with event_hook_removed():
print("Entering debugger. Type 'q' to return to napari.\n")
pdb.post_mortem(tb)
print("\nDebugging finished. Napari active again.")
|
[] |
[] |
[
"NAPARI_CATCH_ERRORS"
] |
[]
|
["NAPARI_CATCH_ERRORS"]
|
python
| 1 | 0 | |
examples/twitter/trends/getTheCurrentGlobalTrendingTopics/main.go
|
package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/twitter"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Twitter.Trends(&twitter.TrendsRequest{})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
skimage/io/_plugins/freeimage_plugin.py
|
import ctypes
import numpy
import sys
import os
import os.path
from numpy.compat import asbytes, asstr
def _generate_candidate_libs():
# look for likely library files in the following dirs:
lib_dirs = [os.path.dirname(__file__),
'/lib',
'/usr/lib',
'/usr/local/lib',
'/opt/local/lib',
os.path.join(sys.prefix, 'lib'),
os.path.join(sys.prefix, 'DLLs')
]
if 'HOME' in os.environ:
lib_dirs.append(os.path.join(os.environ['HOME'], 'lib'))
lib_dirs = [ld for ld in lib_dirs if os.path.exists(ld)]
lib_names = ['libfreeimage', 'freeimage'] # should be lower-case!
# Now attempt to find libraries of that name in the given directory
# (case-insensitive and without regard for extension)
lib_paths = []
for lib_dir in lib_dirs:
for lib_name in lib_names:
files = os.listdir(lib_dir)
lib_paths += [os.path.join(lib_dir, lib) for lib in files
if lib.lower().startswith(lib_name) and not
os.path.splitext(lib)[1] in ('.py', '.pyc', '.ini')]
lib_paths = [lp for lp in lib_paths if os.path.exists(lp)]
return lib_dirs, lib_paths
if sys.platform == 'win32':
LOADER = ctypes.windll
FUNCTYPE = ctypes.WINFUNCTYPE
else:
LOADER = ctypes.cdll
FUNCTYPE = ctypes.CFUNCTYPE
def handle_errors():
global FT_ERROR_STR
if FT_ERROR_STR:
tmp = FT_ERROR_STR
FT_ERROR_STR = None
raise RuntimeError(tmp)
FT_ERROR_STR = None
# This MUST happen in module scope, or the function pointer is garbage
# collected, leading to a segfault when error_handler is called.
@FUNCTYPE(None, ctypes.c_int, ctypes.c_char_p)
def c_error_handler(fif, message):
global FT_ERROR_STR
FT_ERROR_STR = 'FreeImage error: %s' % message
def load_freeimage():
freeimage = None
errors = []
# First try a few bare library names that ctypes might be able to find
# in the default locations for each platform. Win DLL names don't need the
# extension, but other platforms do.
bare_libs = ['FreeImage', 'libfreeimage.dylib', 'libfreeimage.so',
'libfreeimage.so.3']
lib_dirs, lib_paths = _generate_candidate_libs()
lib_paths = bare_libs + lib_paths
for lib in lib_paths:
try:
freeimage = LOADER.LoadLibrary(lib)
break
except Exception:
if lib not in bare_libs:
# Don't record errors when it couldn't load the library from
# a bare name -- this fails often, and doesn't provide any
# useful debugging information anyway, beyond "couldn't find
# library..."
# Get exception instance in Python 2.x/3.x compatible manner
e_type, e_value, e_tb = sys.exc_info()
del e_tb
errors.append((lib, e_value))
if freeimage is None:
if errors:
# No freeimage library loaded, and load-errors reported for some
# candidate libs
err_txt = ['%s:\n%s' % (l, str(e)) for l, e in errors]
raise RuntimeError('One or more FreeImage libraries were found, but '
'could not be loaded due to the following errors:\n'
'\n\n'.join(err_txt))
else:
# No errors, because no potential libraries found at all!
raise RuntimeError('Could not find a FreeImage library in any of:\n' +
'\n'.join(lib_dirs))
# FreeImage found
freeimage.FreeImage_SetOutputMessage(c_error_handler)
return freeimage
_FI = load_freeimage()
API = {
# All we're doing here is telling ctypes that some of the FreeImage
# functions return pointers instead of integers. (On 64-bit systems,
# without this information the pointers get truncated and crashes result).
# There's no need to list functions that return ints, or the types of the
# parameters to these or other functions -- that's fine to do implicitly.
# Note that the ctypes immediately converts the returned void_p back to a
# python int again! This is really not helpful, because then passing it
# back to another library call will cause truncation-to-32-bits on 64-bit
# systems. Thanks, ctypes! So after these calls one must immediately
# re-wrap the int as a c_void_p if it is to be passed back into FreeImage.
'FreeImage_AllocateT': (ctypes.c_void_p, None),
'FreeImage_FindFirstMetadata': (ctypes.c_void_p, None),
'FreeImage_GetBits': (ctypes.c_void_p, None),
'FreeImage_GetPalette': (ctypes.c_void_p, None),
'FreeImage_GetTagKey': (ctypes.c_char_p, None),
'FreeImage_GetTagValue': (ctypes.c_void_p, None),
'FreeImage_Load': (ctypes.c_void_p, None),
'FreeImage_LockPage': (ctypes.c_void_p, None),
'FreeImage_OpenMultiBitmap': (ctypes.c_void_p, None)
}
# Albert's ctypes pattern
def register_api(lib, api):
for f, (restype, argtypes) in api.items():
func = getattr(lib, f)
func.restype = restype
func.argtypes = argtypes
register_api(_FI, API)
class FI_TYPES(object):
FIT_UNKNOWN = 0
FIT_BITMAP = 1
FIT_UINT16 = 2
FIT_INT16 = 3
FIT_UINT32 = 4
FIT_INT32 = 5
FIT_FLOAT = 6
FIT_DOUBLE = 7
FIT_COMPLEX = 8
FIT_RGB16 = 9
FIT_RGBA16 = 10
FIT_RGBF = 11
FIT_RGBAF = 12
dtypes = {
FIT_BITMAP: numpy.uint8,
FIT_UINT16: numpy.uint16,
FIT_INT16: numpy.int16,
FIT_UINT32: numpy.uint32,
FIT_INT32: numpy.int32,
FIT_FLOAT: numpy.float32,
FIT_DOUBLE: numpy.float64,
FIT_COMPLEX: numpy.complex128,
FIT_RGB16: numpy.uint16,
FIT_RGBA16: numpy.uint16,
FIT_RGBF: numpy.float32,
FIT_RGBAF: numpy.float32
}
fi_types = {
(numpy.dtype('uint8'), 1): FIT_BITMAP,
(numpy.dtype('uint8'), 3): FIT_BITMAP,
(numpy.dtype('uint8'), 4): FIT_BITMAP,
(numpy.dtype('uint16'), 1): FIT_UINT16,
(numpy.dtype('int16'), 1): FIT_INT16,
(numpy.dtype('uint32'), 1): FIT_UINT32,
(numpy.dtype('int32'), 1): FIT_INT32,
(numpy.dtype('float32'), 1): FIT_FLOAT,
(numpy.dtype('float64'), 1): FIT_DOUBLE,
(numpy.dtype('complex128'), 1): FIT_COMPLEX,
(numpy.dtype('uint16'), 3): FIT_RGB16,
(numpy.dtype('uint16'), 4): FIT_RGBA16,
(numpy.dtype('float32'), 3): FIT_RGBF,
(numpy.dtype('float32'), 4): FIT_RGBAF
}
extra_dims = {
FIT_UINT16: [],
FIT_INT16: [],
FIT_UINT32: [],
FIT_INT32: [],
FIT_FLOAT: [],
FIT_DOUBLE: [],
FIT_COMPLEX: [],
FIT_RGB16: [3],
FIT_RGBA16: [4],
FIT_RGBF: [3],
FIT_RGBAF: [4]
}
@classmethod
def get_type_and_shape(cls, bitmap):
w = _FI.FreeImage_GetWidth(bitmap)
handle_errors()
h = _FI.FreeImage_GetHeight(bitmap)
handle_errors()
fi_type = _FI.FreeImage_GetImageType(bitmap)
handle_errors()
if not fi_type:
raise ValueError('Unknown image pixel type')
dtype = cls.dtypes[fi_type]
if fi_type == cls.FIT_BITMAP:
bpp = _FI.FreeImage_GetBPP(bitmap)
handle_errors()
if bpp == 8:
extra_dims = []
elif bpp == 24:
extra_dims = [3]
elif bpp == 32:
extra_dims = [4]
else:
raise ValueError('Cannot convert %d BPP bitmap' % bpp)
else:
extra_dims = cls.extra_dims[fi_type]
return numpy.dtype(dtype), extra_dims + [w, h]
class IO_FLAGS(object):
FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only
# (not supported by all plugins)
BMP_DEFAULT = 0
BMP_SAVE_RLE = 1
CUT_DEFAULT = 0
DDS_DEFAULT = 0
EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression
EXR_FLOAT = 0x0001 # save data as float instead of as half (not recommended)
EXR_NONE = 0x0002 # save with no compression
EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines
EXR_PIZ = 0x0008 # save with piz-based wavelet compression
EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression
EXR_B44 = 0x0020 # save with lossy 44% float compression
# - goes to 22% when combined with EXR_LC
EXR_LC = 0x0040 # save images with one luminance and two chroma channels,
# rather than as RGB (lossy compression)
FAXG3_DEFAULT = 0
GIF_DEFAULT = 0
GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed
# palette entries, if it's 16 or 2 color
GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp)
# instead of returning raw frame data when loading
HDR_DEFAULT = 0
ICO_DEFAULT = 0
ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the
# AND-mask when loading
IFF_DEFAULT = 0
J2K_DEFAULT = 0 # save with a 16:1 rate
JP2_DEFAULT = 0 # save with a 16:1 rate
JPEG_DEFAULT = 0 # loading (see JPEG_FAST);
# saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)
JPEG_FAST = 0x0001 # load the file as fast as possible,
# sacrificing some quality
JPEG_ACCURATE = 0x0002 # load the file with the best quality,
# sacrificing some speed
JPEG_CMYK = 0x0004 # load separated CMYK "as is"
# (use | to combine with other load flags)
JPEG_EXIFROTATE = 0x0008 # load and rotate according to
# Exif 'Orientation' tag if available
JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1)
JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1)
JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1)
JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1)
JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1)
JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG
# (use | to combine with other save flags)
JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma
# subsampling (4:1:1)
JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma
# subsampling (4:2:0) - default value
JPEG_SUBSAMPLING_422 = 0x8000 # save with low 2x1 chroma subsampling (4:2:2)
JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4)
JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables
# (can reduce a few percent of file size)
JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers
KOALA_DEFAULT = 0
LBM_DEFAULT = 0
MNG_DEFAULT = 0
PCD_DEFAULT = 0
PCD_BASE = 1 # load the bitmap sized 768 x 512
PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256
PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128
PCX_DEFAULT = 0
PFM_DEFAULT = 0
PICT_DEFAULT = 0
PNG_DEFAULT = 0
PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction
PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag
# (default value is 6)
PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression
# flag (default recommended value)
PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag
# (default value is 6)
PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression
PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine
# with other save flags)
PNM_DEFAULT = 0
PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6)
PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3)
PSD_DEFAULT = 0
PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB)
PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB)
RAS_DEFAULT = 0
RAW_DEFAULT = 0 # load the file as linear RGB 48-bit
RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included
# Exif Data or default to RGB 24-bit
RAW_DISPLAY = 2 # load the file as RGB 24-bit
SGI_DEFAULT = 0
TARGA_DEFAULT = 0
TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888.
TARGA_SAVE_RLE = 2 # Save with RLE compression
TIFF_DEFAULT = 0
TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK
# (use | to combine with compression flags)
TIFF_PACKBITS = 0x0100 # save using PACKBITS compression
TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression
TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression
TIFF_NONE = 0x0800 # save without any compression
TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding
TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding
TIFF_LZW = 0x4000 # save using LZW compression
TIFF_JPEG = 0x8000 # save using JPEG compression
TIFF_LOGLUV = 0x10000 # save using LogLuv compression
WBMP_DEFAULT = 0
XBM_DEFAULT = 0
XPM_DEFAULT = 0
class METADATA_MODELS(object):
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
class METADATA_DATATYPE(object):
FIDT_BYTE = 1 # 8-bit unsigned integer
FIDT_ASCII = 2 # 8-bit bytes w/ last byte null
FIDT_SHORT = 3 # 16-bit unsigned integer
FIDT_LONG = 4 # 32-bit unsigned integer
FIDT_RATIONAL = 5 # 64-bit unsigned fraction
FIDT_SBYTE = 6 # 8-bit signed integer
FIDT_UNDEFINED = 7 # 8-bit untyped data
FIDT_SSHORT = 8 # 16-bit signed integer
FIDT_SLONG = 9 # 32-bit signed integer
FIDT_SRATIONAL = 10 # 64-bit signed fraction
FIDT_FLOAT = 11 # 32-bit IEEE floating point
FIDT_DOUBLE = 12 # 64-bit IEEE floating point
FIDT_IFD = 13 # 32-bit unsigned integer (offset)
FIDT_PALETTE = 14 # 32-bit RGBQUAD
FIDT_LONG8 = 16 # 64-bit unsigned integer
FIDT_SLONG8 = 17 # 64-bit signed integer
FIDT_IFD8 = 18 # 64-bit unsigned integer (offset)
dtypes = {
FIDT_BYTE: numpy.uint8,
FIDT_SHORT: numpy.uint16,
FIDT_LONG: numpy.uint32,
FIDT_RATIONAL: [('numerator', numpy.uint32),
('denominator', numpy.uint32)],
FIDT_SBYTE: numpy.int8,
FIDT_UNDEFINED: numpy.uint8,
FIDT_SSHORT: numpy.int16,
FIDT_SLONG: numpy.int32,
FIDT_SRATIONAL: [('numerator', numpy.int32),
('denominator', numpy.int32)],
FIDT_FLOAT: numpy.float32,
FIDT_DOUBLE: numpy.float64,
FIDT_IFD: numpy.uint32,
FIDT_PALETTE: [('R', numpy.uint8), ('G', numpy.uint8),
('B', numpy.uint8), ('A', numpy.uint8)],
FIDT_LONG8: numpy.uint64,
FIDT_SLONG8: numpy.int64,
FIDT_IFD8: numpy.uint64
}
def _process_bitmap(filename, flags, process_func):
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
bitmap = _FI.FreeImage_Load(ftype, filename, flags)
handle_errors()
bitmap = ctypes.c_void_p(bitmap)
if not bitmap:
raise ValueError('Could not load file %s' % filename)
try:
return process_func(bitmap)
finally:
_FI.FreeImage_Unload(bitmap)
handle_errors()
def read(filename, flags=0):
"""Read an image to a numpy array of shape (height, width) for
greyscale images, or shape (height, width, nchannels) for RGB or
RGBA images.
The `flags` parameter should be one or more values from the IO_FLAGS
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
return _process_bitmap(filename, flags, _array_from_bitmap)
def read_metadata(filename):
"""Return a dict containing all image metadata.
Returned dict maps (metadata_model, tag_name) keys to tag values, where
metadata_model is a string name based on the FreeImage "metadata models"
defined in the class METADATA_MODELS.
"""
flags = IO_FLAGS.FIF_LOAD_NOPIXELS
return _process_bitmap(filename, flags, _read_metadata)
def _process_multipage(filename, flags, process_func):
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
create_new = False
read_only = True
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename, create_new,
read_only, keep_cache_in_memory,
flags)
handle_errors()
multibitmap = ctypes.c_void_p(multibitmap)
if not multibitmap:
raise ValueError('Could not open %s as multi-page image.' % filename)
try:
pages = _FI.FreeImage_GetPageCount(multibitmap)
handle_errors()
out = []
for i in range(pages):
bitmap = _FI.FreeImage_LockPage(multibitmap, i)
handle_errors()
bitmap = ctypes.c_void_p(bitmap)
if not bitmap:
raise ValueError('Could not open %s as a multi-page image.'
% filename)
try:
out.append(process_func(bitmap))
finally:
_FI.FreeImage_UnlockPage(multibitmap, bitmap, False)
handle_errors()
return out
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, 0)
handle_errors()
def read_multipage(filename, flags=0):
"""Read a multipage image to a list of numpy arrays, where each
array is of shape (height, width) for greyscale images, or shape
(height, width, nchannels) for RGB or RGBA images.
The `flags` parameter should be one or more values from the IO_FLAGS
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
return _process_multipage(filename, flags, _array_from_bitmap)
def read_multipage_metadata(filename):
"""Read a multipage image to a list of metadata dicts, one dict for each
page. The dict format is as in read_metadata().
"""
flags = IO_FLAGS.FIF_LOAD_NOPIXELS
return _process_multipage(filename, flags, _read_metadata)
def _wrap_bitmap_bits_in_array(bitmap, shape, dtype):
"""Return an ndarray view on the data in a FreeImage bitmap. Only
valid for as long as the bitmap is loaded (if single page) / locked
in memory (if multipage).
"""
pitch = _FI.FreeImage_GetPitch(bitmap)
handle_errors()
height = shape[-1]
byte_size = height * pitch
itemsize = dtype.itemsize
if len(shape) == 3:
strides = (itemsize, shape[0] * itemsize, pitch)
else:
strides = (itemsize, pitch)
bits = _FI.FreeImage_GetBits(bitmap)
handle_errors()
array = numpy.ndarray(shape, dtype=dtype,
buffer=(ctypes.c_char * byte_size).from_address(bits),
strides=strides)
return array
def _array_from_bitmap(bitmap):
"""Convert a FreeImage bitmap pointer to a numpy array.
"""
dtype, shape = FI_TYPES.get_type_and_shape(bitmap)
array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype)
# swizzle the color components and flip the scanlines to go from
# FreeImage's BGR[A] and upside-down internal memory format to something
# more normal
def n(arr):
return arr[..., ::-1].T
if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \
dtype.type == numpy.uint8:
b = n(array[0])
g = n(array[1])
r = n(array[2])
if shape[0] == 3:
handle_errors()
return numpy.dstack((r, g, b))
elif shape[0] == 4:
a = n(array[3])
return numpy.dstack((r, g, b, a))
else:
raise ValueError('Cannot handle images of shape %s' % shape)
# We need to copy because array does *not* own its memory
# after bitmap is freed.
return n(array).copy()
def _read_metadata(bitmap):
metadata = {}
models = [(name[5:], number) for name, number in
METADATA_MODELS.__dict__.items() if name.startswith('FIMD_')]
tag = ctypes.c_void_p()
for model_name, number in models:
mdhandle = _FI.FreeImage_FindFirstMetadata(number, bitmap,
ctypes.byref(tag))
handle_errors()
mdhandle = ctypes.c_void_p(mdhandle)
if mdhandle:
more = True
while more:
tag_name = asstr(_FI.FreeImage_GetTagKey(tag))
tag_type = _FI.FreeImage_GetTagType(tag)
byte_size = _FI.FreeImage_GetTagLength(tag)
handle_errors()
char_ptr = ctypes.c_char * byte_size
tag_str = char_ptr.from_address(_FI.FreeImage_GetTagValue(tag))
handle_errors()
if tag_type == METADATA_DATATYPE.FIDT_ASCII:
tag_val = asstr(tag_str.value)
else:
tag_val = numpy.fromstring(tag_str,
dtype=METADATA_DATATYPE.dtypes[tag_type])
if len(tag_val) == 1:
tag_val = tag_val[0]
metadata[(model_name, tag_name)] = tag_val
more = _FI.FreeImage_FindNextMetadata(mdhandle, ctypes.byref(tag))
handle_errors()
_FI.FreeImage_FindCloseMetadata(mdhandle)
handle_errors()
return metadata
def write(array, filename, flags=0):
"""Write a (height, width) or (height, width, nchannels) array to
a greyscale, RGB, or RGBA image, with file type deduced from the
filename.
The `flags` parameter should be one or more values from the IO_FLAGS
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
array = numpy.asarray(array)
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
handle_errors()
if ftype == -1:
raise ValueError('Cannot determine type for %s' % filename)
bitmap, fi_type = _array_to_bitmap(array)
try:
if fi_type == FI_TYPES.FIT_BITMAP:
can_write = _FI.FreeImage_FIFSupportsExportBPP(ftype,
_FI.FreeImage_GetBPP(bitmap))
handle_errors()
else:
can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type)
handle_errors()
if not can_write:
raise TypeError('Cannot save image of this format '
'to this file type')
res = _FI.FreeImage_Save(ftype, bitmap, filename, flags)
handle_errors()
if not res:
raise RuntimeError('Could not save image properly.')
finally:
_FI.FreeImage_Unload(bitmap)
handle_errors()
def write_multipage(arrays, filename, flags=0):
"""Write a list of (height, width) or (height, width, nchannels)
arrays to a multipage greyscale, RGB, or RGBA image, with file type
deduced from the filename.
The `flags` parameter should be one or more values from the IO_FLAGS
class defined in this module, or-ed together with | as appropriate.
(See the source-code comments for more details.)
"""
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
create_new = True
read_only = False
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename,
create_new, read_only,
keep_cache_in_memory, 0)
multibitmap = ctypes.c_void_p(multibitmap)
if not multibitmap:
raise ValueError('Could not open %s for writing multi-page image.' %
filename)
try:
for array in arrays:
array = numpy.asarray(array)
bitmap, fi_type = _array_to_bitmap(array)
_FI.FreeImage_AppendPage(multibitmap, bitmap)
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, flags)
# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255
_GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32)
def _array_to_bitmap(array):
"""Allocate a FreeImage bitmap and copy a numpy array into it.
"""
shape = array.shape
dtype = array.dtype
r, c = shape[:2]
if len(shape) == 2:
n_channels = 1
w_shape = (c, r)
elif len(shape) == 3:
n_channels = shape[2]
w_shape = (n_channels, c, r)
else:
n_channels = shape[0]
try:
fi_type = FI_TYPES.fi_types[(dtype, n_channels)]
except KeyError:
raise ValueError('Cannot write arrays of given type and shape.')
itemsize = array.dtype.itemsize
bpp = 8 * itemsize * n_channels
bitmap = _FI.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)
bitmap = ctypes.c_void_p(bitmap)
if not bitmap:
raise RuntimeError('Could not allocate image for storage')
try:
def n(arr): # normalise to freeimage's in-memory format
return arr.T[..., ::-1]
wrapped_array = _wrap_bitmap_bits_in_array(bitmap, w_shape, dtype)
# swizzle the color components and flip the scanlines to go to
# FreeImage's BGR[A] and upside-down internal memory format
if len(shape) == 3 and _FI.FreeImage_IsLittleEndian():
R = array[:, :, 0]
G = array[:, :, 1]
B = array[:, :, 2]
if dtype.type == numpy.uint8:
wrapped_array[0] = n(B)
wrapped_array[1] = n(G)
wrapped_array[2] = n(R)
elif dtype.type == numpy.uint16:
wrapped_array[0] = n(R)
wrapped_array[1] = n(G)
wrapped_array[2] = n(B)
if shape[2] == 4:
A = array[:, :, 3]
wrapped_array[3] = n(A)
else:
wrapped_array[:] = n(array)
if len(shape) == 2 and dtype.type == numpy.uint8:
palette = _FI.FreeImage_GetPalette(bitmap)
palette = ctypes.c_void_p(palette)
if not palette:
raise RuntimeError('Could not get image palette')
ctypes.memmove(palette, _GREY_PALETTE.ctypes.data, 1024)
return bitmap, fi_type
except:
_FI.FreeImage_Unload(bitmap)
raise
def imread(filename):
"""
img = imread(filename)
Reads an image from file `filename`
Parameters
----------
filename : file name
Returns
-------
img : ndarray
"""
img = read(filename)
return img
def imsave(filename, img):
'''
imsave(filename, img)
Save image to disk
Image type is inferred from filename
Parameters
----------
filename : file name
img : image to be saved as nd array
'''
write(img, filename)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
muz/frontend/curses/main.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import time
import locale
import curses
import logging
import pygame
import muz
import muz.config
from . import Sound, Music
from .gamerenderer import GameRenderer
from . import __name__ as __parentname__
log = logging.getLogger(__name__)
class Clock(muz.frontend.Clock):
def __init__(self, t=0):
self._time = t
self._oldtime = t
self._delta = 0
self._fps = 0
@property
def time(self):
return self._time
@time.setter
def time(self, t):
self._delta = t - self._time
self._oldtime = self._time
self._time = t
try:
f = 1000.0 / self._delta
except ZeroDivisionError:
pass
else:
self._fps = f
@property
def deltaTime(self):
return self._delta
@property
def fps(self):
return self._fps
class Frontend(muz.frontend.Frontend):
def __init__(self, args, namespace):
self.config = self.makeDefaultConfig()
self._music = Music()
@property
def supportedMusicFormats(self):
return "ogg", "mp3", "wav"
@property
def supportedSoundFormats(self):
return "ogg", "wav"
def initAudio(self):
a = self.config["audio"]
if a["driver"] != "default":
os.environ["SDL_AUDIODRIVER"] = a["driver"]
pygame.mixer.init(
frequency=a["frequency"],
size=a["size"],
channels=a["channels"],
buffer=a["buffer"]
)
def postInit(self):
locale.setlocale(locale.LC_ALL, '')
self.initAudio()
def shutdown(self):
log.info("shutting down")
def loadSound(self, node):
s = Sound(node)
s.volume = self.config["audio"]["sound-effect-volume"]
return s
def loadMusic(self, node):
self._music._paused = False
pygame.mixer.music.load(node.realPath)
pygame.mixer.music.set_volume(self.config["audio"]["music-volume"])
return self._music
def makeDefaultConfigAudio(self):
return {
"frequency" : 44100,
"buffer" : 1024,
"size" : -16,
"channels" : 2,
"driver" : "default",
"sound-effect-volume" : 0.7,
"music-volume" : 0.5,
}
def makeDefaultConfigRoot(self):
return {
"audio" : self.makeDefaultConfigAudio(),
"use-default-colors" : True,
}
def makeDefaultConfig(self):
return muz.config.get(__parentname__, self.makeDefaultConfigRoot())
def cursesGameLoop(self, scr):
curses.start_color()
if self.config["use-default-colors"]:
curses.use_default_colors()
bg = -1
else:
bg = curses.COLOR_BLACK
for c in xrange(8):
curses.init_pair(c + 1, c, bg)
curses.curs_set(0)
mainwin = scr
win = mainwin
#win = curses.newwin(30, 100, 10, 10)
win.nodelay(True)
game = self.activity
while True:
game.clock.time = time.time() * 1000
game.update()
if game.paused:
game.resume()
game.renderer.draw(win)
def gameLoop(self, game):
self.activity = game
game.clock = Clock(int(time.time() * 1000))
game.renderer = GameRenderer(game)
curses.wrapper(self.cursesGameLoop)
def initKeymap(self, submap=None):
pass
@property
def title(self):
return ""
@title.setter
def title(self, v):
pass
|
[] |
[] |
[
"SDL_AUDIODRIVER"
] |
[]
|
["SDL_AUDIODRIVER"]
|
python
| 1 | 0 | |
examples/logical_enclosures.go
|
package main
import (
"fmt"
"github.com/HewlettPackard/oneview-golang/ov"
"github.com/HewlettPackard/oneview-golang/utils"
"os"
"strconv"
"strings"
)
func main() {
var (
ClientOV *ov.OVClient
logical_enclosure = "TestLE"
logical_enclosure_1 = "TestLE-Renamed"
scope_name = "Auto-Scope"
eg_name = "Auto-TestEG"
// li_name = "<logical_interconnect_name>"
)
apiversion, _ := strconv.Atoi(os.Getenv("ONEVIEW_APIVERSION"))
ovc := ClientOV.NewOVClient(
os.Getenv("ONEVIEW_OV_USER"),
os.Getenv("ONEVIEW_OV_PASSWORD"),
os.Getenv("ONEVIEW_OV_DOMAIN"),
os.Getenv("ONEVIEW_OV_ENDPOINT"),
false,
apiversion,
"*")
fmt.Println("#................... Create Logical Enclosure ...............#")
enclosureUris := new([]utils.Nstring)
*enclosureUris = append(*enclosureUris, utils.NewNstring("/rest/enclosures/0000000000A66101"))
*enclosureUris = append(*enclosureUris, utils.NewNstring("/rest/enclosures/0000000000A66102"))
*enclosureUris = append(*enclosureUris, utils.NewNstring("/rest/enclosures/0000000000A66103"))
enc_grp, err := ovc.GetEnclosureGroupByName(eg_name)
logicalEnclosure := ov.LogicalEnclosure{Name: logical_enclosure,
EnclosureUris: *enclosureUris,
EnclosureGroupUri: enc_grp.URI}
er := ovc.CreateLogicalEnclosure(logicalEnclosure)
if er != nil {
fmt.Println("............... Logical Enclosure Creation Failed:", er)
} else {
fmt.Println(".... Logical Enclosure Created Success")
}
fmt.Println("#................... Logical Enclosure by Name ...............#")
log_en, err := ovc.GetLogicalEnclosureByName(logical_enclosure)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(log_en)
}
logicalInterconnect, _ := ovc.GetLogicalInterconnects("", "", "")
li := ov.LogicalInterconnect{}
for i := 0; i < len(logicalInterconnect.Members); i++ {
if logicalInterconnect.Members[i].URI == log_en.LogicalInterconnectUris[0] {
li = logicalInterconnect.Members[i]
}
}
fmt.Println("#................... Create Logical Enclosure Support Dumps ...............#")
supportdmp := ov.SupportDumps{ErrorCode: "MyDump16",
ExcludeApplianceDump: false,
LogicalInterconnectUris: []utils.Nstring{li.URI}}
le_id := strings.Replace(string(log_en.URI), "/rest/logical-enclosures/", "", 1)
data, er := ovc.CreateSupportDump(supportdmp, le_id)
if er != nil {
fmt.Println("............... Logical Enclosure Support Dump Creation Failed:", er)
} else {
fmt.Println(".... Logical Enclosure Support Dump Created Successfully", data)
fmt.Println(data["URI"])
id := strings.Trim(data["URI"], "/rest/tasks/")
task, err := ovc.GetTasksById("", "", "", "", id)
if err != nil {
fmt.Println("Error getting the task details ", err)
}
fmt.Println(task)
}
// Update Logical Enslosure From Logical Interconnect Group
err = ovc.UpdateFromGroupLogicalEnclosure(log_en)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("#............. Update From Group Logical Enclosure Successfully .....#")
}
scope1, err := ovc.GetScopeByName(scope_name)
scope_uri := scope1.URI
scope_Uris := new([]string)
*scope_Uris = append(*scope_Uris, scope_uri.String())
// Update Logical Enclosure
log_enc, _ := ovc.GetLogicalEnclosureByName(logical_enclosure)
log_enc.Name = logical_enclosure_1
log_enc.ScopesUri = scope_uri
err = ovc.UpdateLogicalEnclosure(log_enc)
sort := "name:desc"
if err != nil {
fmt.Println(err)
} else {
fmt.Println("#.................... Logical Enclosure after Updating ...........#")
log_en_after_update, err := ovc.GetLogicalEnclosures("", "", "", *scope_Uris, sort)
if err != nil {
fmt.Println(err)
} else {
for i := 0; i < len(log_en_after_update.Members); i++ {
fmt.Println(log_en_after_update.Members[i].Name)
}
}
}
// Filtering Logical Enclosure with Scope
log_en_list, err := ovc.GetLogicalEnclosures("", "", "", *scope_Uris, sort)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("# ................... Logical Enclosures List .................#")
for i := 0; i < len(log_en_list.Members); i++ {
fmt.Println(log_en_list.Members[i].Name)
}
}
// Deleting Logical Enclosure
/* err = ovc.DeleteLogicalEnclosure(logical_enclosure_1)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("#...................... Deleted Logical Enclosure Successfully .....#")
}
*/
}
|
[
"\"ONEVIEW_APIVERSION\"",
"\"ONEVIEW_OV_USER\"",
"\"ONEVIEW_OV_PASSWORD\"",
"\"ONEVIEW_OV_DOMAIN\"",
"\"ONEVIEW_OV_ENDPOINT\""
] |
[] |
[
"ONEVIEW_OV_ENDPOINT",
"ONEVIEW_OV_DOMAIN",
"ONEVIEW_APIVERSION",
"ONEVIEW_OV_PASSWORD",
"ONEVIEW_OV_USER"
] |
[]
|
["ONEVIEW_OV_ENDPOINT", "ONEVIEW_OV_DOMAIN", "ONEVIEW_APIVERSION", "ONEVIEW_OV_PASSWORD", "ONEVIEW_OV_USER"]
|
go
| 5 | 0 | |
providers/seatalk/seatalk_test.go
|
package seatalk_test
import (
"github.com/a93h/goth"
"github.com/a93h/goth/providers/seatalk"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("SEATALK_KEY"))
a.Equal(p.Secret, os.Getenv("SEATALK_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*seatalk.Session)
a.NoError(err)
a.Contains(s.AuthURL, "seatalkweb.com/webapp/oauth2/authorize")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://seatalkweb.com/webapp/oauth2/authorize","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*seatalk.Session)
a.Equal(s.AuthURL, "https://seatalkweb.com/webapp/oauth2/authorize")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *seatalk.Provider {
return seatalk.New(os.Getenv("SEATALK_KEY"), os.Getenv("SEATALK_SECRET"), "/foo")
}
|
[
"\"SEATALK_KEY\"",
"\"SEATALK_SECRET\"",
"\"SEATALK_KEY\"",
"\"SEATALK_SECRET\""
] |
[] |
[
"SEATALK_SECRET",
"SEATALK_KEY"
] |
[]
|
["SEATALK_SECRET", "SEATALK_KEY"]
|
go
| 2 | 0 | |
python_modules/dagster/dagster/core/instance/__init__.py
|
import inspect
import logging
import os
import sys
import tempfile
import time
import warnings
import weakref
from collections import defaultdict
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Union
import yaml
from dagster import check
from dagster.core.definitions.events import AssetKey
from dagster.core.definitions.pipeline import PipelineDefinition, PipelineSubsetDefinition
from dagster.core.definitions.pipeline_base import InMemoryPipeline
from dagster.core.errors import (
DagsterHomeNotSetError,
DagsterInvariantViolationError,
DagsterNoStepsToExecuteException,
DagsterRunAlreadyExists,
DagsterRunConflict,
)
from dagster.core.storage.pipeline_run import (
PipelineRun,
PipelineRunStatus,
PipelineRunsFilter,
RunRecord,
)
from dagster.core.storage.tags import MEMOIZED_RUN_TAG
from dagster.core.system_config.objects import ResolvedRunConfig
from dagster.core.utils import str_format_list
from dagster.serdes import ConfigurableClass
from dagster.seven import get_current_datetime_in_utc
from dagster.utils.error import serializable_error_info_from_exc_info
from .config import DAGSTER_CONFIG_YAML_FILENAME, is_dagster_home_set
from .ref import InstanceRef
# 'airflow_execution_date' and 'is_airflow_ingest_pipeline' are hardcoded tags used in the
# airflow ingestion logic (see: dagster_pipeline_factory.py). 'airflow_execution_date' stores the
# 'execution_date' used in Airflow operator execution and 'is_airflow_ingest_pipeline' determines
# whether 'airflow_execution_date' is needed.
# https://github.com/dagster-io/dagster/issues/2403
AIRFLOW_EXECUTION_DATE_STR = "airflow_execution_date"
IS_AIRFLOW_INGEST_PIPELINE_STR = "is_airflow_ingest_pipeline"
if TYPE_CHECKING:
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.host_representation import HistoricalPipeline
from dagster.core.snap import PipelineSnapshot
from dagster.daemon.types import DaemonHeartbeat
def is_memoized_run(tags):
return tags is not None and MEMOIZED_RUN_TAG in tags and tags.get(MEMOIZED_RUN_TAG) == "true"
def _check_run_equality(pipeline_run, candidate_run):
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.inst_param(candidate_run, "candidate_run", PipelineRun)
field_diff = {}
for field in pipeline_run._fields:
expected_value = getattr(pipeline_run, field)
candidate_value = getattr(candidate_run, field)
if expected_value != candidate_value:
field_diff[field] = (expected_value, candidate_value)
return field_diff
def _format_field_diff(field_diff):
return "\n".join(
[
(
" {field_name}:\n"
+ " Expected: {expected_value}\n"
+ " Received: {candidate_value}"
).format(
field_name=field_name,
expected_value=expected_value,
candidate_value=candidate_value,
)
for field_name, (
expected_value,
candidate_value,
) in field_diff.items()
]
)
class _EventListenerLogHandler(logging.Handler):
def __init__(self, instance):
self._instance = instance
super(_EventListenerLogHandler, self).__init__()
def emit(self, record):
from dagster.core.events.log import construct_event_record, StructuredLoggerMessage
try:
event = construct_event_record(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record.dagster_meta,
record=record,
)
)
self._instance.handle_new_event(event)
except Exception as e: # pylint: disable=W0703
logging.critical("Error during instance event listen")
logging.exception(str(e))
raise
class InstanceType(Enum):
PERSISTENT = "PERSISTENT"
EPHEMERAL = "EPHEMERAL"
class MayHaveInstanceWeakref:
"""Mixin for classes that can have a weakref back to a Dagster instance."""
def __init__(self):
self._instance_weakref: weakref.ref = None
@property
def _instance(self):
return (
self._instance_weakref()
# Backcompat with custom subclasses that don't call super().__init__()
# in their own __init__ implementations
if (hasattr(self, "_instance_weakref") and self._instance_weakref is not None)
else None
)
def register_instance(self, instance):
check.inst_param(instance, "instance", DagsterInstance)
check.invariant(
# Backcompat with custom subclasses that don't call super().__init__()
# in their own __init__ implementations
(not hasattr(self, "_instance_weakref") or self._instance_weakref is None),
"Must only call initialize once",
)
# Store a weakref to avoid a circular reference / enable GC
self._instance_weakref = weakref.ref(instance)
class DagsterInstance:
"""Core abstraction for managing Dagster's access to storage and other resources.
Use DagsterInstance.get() to grab the current DagsterInstance which will load based on
the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``.
Alternatively, DagsterInstance.ephemeral() can use used which provides a set of
transient in-memory components.
Configuration of this class should be done by setting values in ``$DAGSTER_HOME/dagster.yaml``.
For example, to use Postgres for run and event log storage, you can write a ``dagster.yaml``
such as the following:
.. literalinclude:: ../../../../docs/sections/deploying/postgres_dagster.yaml
:caption: dagster.yaml
:language: YAML
Args:
instance_type (InstanceType): Indicates whether the instance is ephemeral or persistent.
Users should not attempt to set this value directly or in their ``dagster.yaml`` files.
local_artifact_storage (LocalArtifactStorage): The local artifact storage is used to
configure storage for any artifacts that require a local disk, such as schedules, or
when using the filesystem system storage to manage files and intermediates. By default,
this will be a :py:class:`dagster.core.storage.root.LocalArtifactStorage`. Configurable
in ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass`
machinery.
run_storage (RunStorage): The run storage is used to store metadata about ongoing and past
pipeline runs. By default, this will be a
:py:class:`dagster.core.storage.runs.SqliteRunStorage`. Configurable in ``dagster.yaml``
using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
event_storage (EventLogStorage): Used to store the structured event logs generated by
pipeline runs. By default, this will be a
:py:class:`dagster.core.storage.event_log.SqliteEventLogStorage`. Configurable in
``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
compute_log_manager (ComputeLogManager): The compute log manager handles stdout and stderr
logging for solid compute functions. By default, this will be a
:py:class:`dagster.core.storage.local_compute_log_manager.LocalComputeLogManager`.
Configurable in ``dagster.yaml`` using the
:py:class:`~dagster.serdes.ConfigurableClass` machinery.
run_coordinator (RunCoordinator): A runs coordinator may be used to manage the execution
of pipeline runs.
run_launcher (Optional[RunLauncher]): Optionally, a run launcher may be used to enable
a Dagster instance to launch pipeline runs, e.g. on a remote Kubernetes cluster, in
addition to running them locally.
settings (Optional[Dict]): Specifies certain per-instance settings,
such as feature flags. These are set in the ``dagster.yaml`` under a set of whitelisted
keys.
ref (Optional[InstanceRef]): Used by internal machinery to pass instances across process
boundaries.
"""
_PROCESS_TEMPDIR = None
def __init__(
self,
instance_type,
local_artifact_storage,
run_storage,
event_storage,
compute_log_manager,
schedule_storage=None,
scheduler=None,
run_coordinator=None,
run_launcher=None,
settings=None,
ref=None,
):
from dagster.core.storage.compute_log_manager import ComputeLogManager
from dagster.core.storage.event_log import EventLogStorage
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import RunStorage
from dagster.core.storage.schedules import ScheduleStorage
from dagster.core.scheduler import Scheduler
from dagster.core.run_coordinator import RunCoordinator
from dagster.core.launcher import RunLauncher
self._instance_type = check.inst_param(instance_type, "instance_type", InstanceType)
self._local_artifact_storage = check.inst_param(
local_artifact_storage, "local_artifact_storage", LocalArtifactStorage
)
self._event_storage = check.inst_param(event_storage, "event_storage", EventLogStorage)
self._event_storage.register_instance(self)
self._run_storage = check.inst_param(run_storage, "run_storage", RunStorage)
self._run_storage.register_instance(self)
self._compute_log_manager = check.inst_param(
compute_log_manager, "compute_log_manager", ComputeLogManager
)
self._compute_log_manager.register_instance(self)
self._scheduler = check.opt_inst_param(scheduler, "scheduler", Scheduler)
self._schedule_storage = check.opt_inst_param(
schedule_storage, "schedule_storage", ScheduleStorage
)
if self._schedule_storage:
self._schedule_storage.register_instance(self)
self._run_coordinator = check.inst_param(run_coordinator, "run_coordinator", RunCoordinator)
self._run_coordinator.register_instance(self)
if hasattr(self._run_coordinator, "initialize") and inspect.ismethod(
getattr(self._run_coordinator, "initialize")
):
warnings.warn(
"The initialize method on RunCoordinator has been deprecated as of 0.11.0 and will "
"no longer be called during DagsterInstance init. Instead, the DagsterInstance "
"will be made automatically available on any run coordinator associated with a "
"DagsterInstance. In test, you may need to call RunCoordinator.register_instance() "
"(mixed in from MayHaveInstanceWeakref). If you need to make use of the instance "
"to set up your custom RunCoordinator, you should override "
"RunCoordintor.register_instance(). This warning will be removed in 0.12.0."
)
self._run_launcher = check.inst_param(run_launcher, "run_launcher", RunLauncher)
self._run_launcher.register_instance(self)
if hasattr(self._run_launcher, "initialize") and inspect.ismethod(
getattr(self._run_launcher, "initialize")
):
warnings.warn(
"The initialize method on RunLauncher has been deprecated as of 0.11.0 and will "
"no longer be called during DagsterInstance init. Instead, the DagsterInstance "
"will be made automatically available on any run launcher associated with a "
"DagsterInstance. In test, you may need to call RunLauncher.register_instance() "
"(mixed in from MayHaveInstanceWeakref). If you need to make use of the instance "
"to set up your custom RunLauncher, you should override "
"RunLauncher.register_instance(). This warning will be removed in 0.12.0."
)
self._settings = check.opt_dict_param(settings, "settings")
self._ref = check.opt_inst_param(ref, "ref", InstanceRef)
self._subscribers = defaultdict(list)
# ctors
@staticmethod
def ephemeral(tempdir=None, preload=None):
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
if tempdir is None:
tempdir = DagsterInstance.temp_storage()
return DagsterInstance(
InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(tempdir),
run_storage=InMemoryRunStorage(preload=preload),
event_storage=InMemoryEventLogStorage(preload=preload),
compute_log_manager=NoOpComputeLogManager(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=SyncInMemoryRunLauncher(),
)
@staticmethod
def get():
dagster_home_path = os.getenv("DAGSTER_HOME")
if not dagster_home_path:
raise DagsterHomeNotSetError(
(
"The environment variable $DAGSTER_HOME is not set. \n"
"Dagster requires this environment variable to be set to an existing directory in your filesystem. "
"This directory is used to store metadata across sessions, or load the dagster.yaml "
"file which can configure storing metadata in an external database.\n"
"You can resolve this error by exporting the environment variable. For example, you can run the following command in your shell or include it in your shell configuration file:\n"
'\texport DAGSTER_HOME="~/dagster_home"\n'
"Alternatively, DagsterInstance.ephemeral() can be used for a transient instance.\n"
)
)
dagster_home_path = os.path.expanduser(dagster_home_path)
if not os.path.isabs(dagster_home_path):
raise DagsterInvariantViolationError(
(
'$DAGSTER_HOME "{}" must be an absolute path. Dagster requires this '
"environment variable to be set to an existing directory in your filesystem."
).format(dagster_home_path)
)
if not (os.path.exists(dagster_home_path) and os.path.isdir(dagster_home_path)):
raise DagsterInvariantViolationError(
(
'$DAGSTER_HOME "{}" is not a directory or does not exist. Dagster requires this '
"environment variable to be set to an existing directory in your filesystem"
).format(dagster_home_path)
)
return DagsterInstance.from_config(dagster_home_path)
@staticmethod
def local_temp(tempdir=None, overrides=None):
if tempdir is None:
tempdir = DagsterInstance.temp_storage()
return DagsterInstance.from_ref(InstanceRef.from_dir(tempdir, overrides=overrides))
@staticmethod
def from_config(
config_dir,
config_filename=DAGSTER_CONFIG_YAML_FILENAME,
):
instance_ref = InstanceRef.from_dir(config_dir, config_filename=config_filename)
return DagsterInstance.from_ref(instance_ref)
@staticmethod
def from_ref(instance_ref):
check.inst_param(instance_ref, "instance_ref", InstanceRef)
# DagsterInstance doesn't implement ConfigurableClass, but we may still sometimes want to
# have custom subclasses of DagsterInstance. This machinery allows for those custom
# subclasses to receive additional keyword arguments passed through the config YAML. Note
# that unlike a ConfigurableClass, these additional arguments are not type checked -- the
# raw Python dict returned by yaml.load is just splatted into kwargs.
klass = instance_ref.custom_instance_class or DagsterInstance
kwargs = instance_ref.custom_instance_class_config
return klass(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=instance_ref.local_artifact_storage,
run_storage=instance_ref.run_storage,
event_storage=instance_ref.event_storage,
compute_log_manager=instance_ref.compute_log_manager,
schedule_storage=instance_ref.schedule_storage,
scheduler=instance_ref.scheduler,
run_coordinator=instance_ref.run_coordinator,
run_launcher=instance_ref.run_launcher,
settings=instance_ref.settings,
ref=instance_ref,
**kwargs,
)
# flags
@property
def is_persistent(self):
return self._instance_type == InstanceType.PERSISTENT
@property
def is_ephemeral(self):
return self._instance_type == InstanceType.EPHEMERAL
def get_ref(self):
if self._ref:
return self._ref
check.failed(
"Attempted to prepare an ineligible DagsterInstance ({inst_type}) for cross "
"process communication.{dagster_home_msg}".format(
inst_type=self._instance_type,
dagster_home_msg="\nDAGSTER_HOME environment variable is not set, set it to "
"a directory on the filesystem for dagster to use for storage and cross "
"process coordination."
if os.getenv("DAGSTER_HOME") is None
else "",
)
)
@property
def root_directory(self):
return self._local_artifact_storage.base_dir
@staticmethod
def temp_storage():
if DagsterInstance._PROCESS_TEMPDIR is None:
DagsterInstance._PROCESS_TEMPDIR = tempfile.TemporaryDirectory()
return DagsterInstance._PROCESS_TEMPDIR.name
def _info(self, component):
# ConfigurableClass may not have inst_data if it's a direct instantiation
# which happens for ephemeral instances
if isinstance(component, ConfigurableClass) and component.inst_data:
return component.inst_data.info_dict()
if type(component) is dict:
return component
return component.__class__.__name__
def _info_str_for_component(self, component_name, component):
return yaml.dump(
{component_name: self._info(component)}, default_flow_style=False, sort_keys=False
)
def info_dict(self):
settings = self._settings if self._settings else {}
ret = {
"local_artifact_storage": self._info(self._local_artifact_storage),
"run_storage": self._info(self._run_storage),
"event_log_storage": self._info(self._event_storage),
"compute_logs": self._info(self._compute_log_manager),
"schedule_storage": self._info(self._schedule_storage),
"scheduler": self._info(self._scheduler),
"run_coordinator": self._info(self._run_coordinator),
"run_launcher": self._info(self._run_launcher),
}
ret.update(
{
settings_key: self._info(settings_value)
for settings_key, settings_value in settings.items()
}
)
return ret
def info_str(self):
return yaml.dump(self.info_dict(), default_flow_style=False, sort_keys=False)
@property
def run_storage(self):
return self._run_storage
@property
def event_log_storage(self):
return self._event_storage
# schedule storage
@property
def schedule_storage(self):
return self._schedule_storage
@property
def scheduler(self):
return self._scheduler
@property
def scheduler_class(self):
return self.scheduler.__class__.__name__ if self.scheduler else None
# run coordinator
@property
def run_coordinator(self):
return self._run_coordinator
# run launcher
@property
def run_launcher(self):
return self._run_launcher
# compute logs
@property
def compute_log_manager(self):
return self._compute_log_manager
def get_settings(self, settings_key):
check.str_param(settings_key, "settings_key")
if self._settings and settings_key in self._settings:
return self._settings.get(settings_key)
return {}
@property
def telemetry_enabled(self):
if self.is_ephemeral:
return False
dagster_telemetry_enabled_default = True
telemetry_settings = self.get_settings("telemetry")
if not telemetry_settings:
return dagster_telemetry_enabled_default
if "enabled" in telemetry_settings:
return telemetry_settings["enabled"]
else:
return dagster_telemetry_enabled_default
def upgrade(self, print_fn=None):
from dagster.core.storage.migration.utils import upgrading_instance
with upgrading_instance(self):
if print_fn:
print_fn("Updating run storage...")
self._run_storage.upgrade()
self._run_storage.build_missing_indexes(print_fn=print_fn)
if print_fn:
print_fn("Updating event storage...")
self._event_storage.upgrade()
if print_fn:
print_fn("Updating schedule storage...")
self._schedule_storage.upgrade()
def optimize_for_dagit(self, statement_timeout):
if self._schedule_storage:
self._schedule_storage.validate_stored_schedules(self.scheduler_class)
self._schedule_storage.optimize_for_dagit(statement_timeout=statement_timeout)
self._run_storage.optimize_for_dagit(statement_timeout=statement_timeout)
self._event_storage.optimize_for_dagit(statement_timeout=statement_timeout)
def reindex(self, print_fn=lambda _: None):
print_fn("Checking for reindexing...")
self._event_storage.reindex(print_fn)
self._run_storage.reindex(print_fn)
print_fn("Done.")
def dispose(self):
self._run_storage.dispose()
self.run_coordinator.dispose()
self._run_launcher.dispose()
self._event_storage.dispose()
self._compute_log_manager.dispose()
# run storage
def get_run_by_id(self, run_id: str) -> PipelineRun:
return self._run_storage.get_run_by_id(run_id)
def get_pipeline_snapshot(self, snapshot_id: str) -> "PipelineSnapshot":
return self._run_storage.get_pipeline_snapshot(snapshot_id)
def has_pipeline_snapshot(self, snapshot_id: str) -> bool:
return self._run_storage.has_pipeline_snapshot(snapshot_id)
def get_historical_pipeline(self, snapshot_id: str) -> "HistoricalPipeline":
from dagster.core.host_representation import HistoricalPipeline
snapshot = self._run_storage.get_pipeline_snapshot(snapshot_id)
parent_snapshot = (
self._run_storage.get_pipeline_snapshot(snapshot.lineage_snapshot.parent_snapshot_id)
if snapshot.lineage_snapshot
else None
)
return HistoricalPipeline(
self._run_storage.get_pipeline_snapshot(snapshot_id), snapshot_id, parent_snapshot
)
def has_historical_pipeline(self, snapshot_id):
return self._run_storage.has_pipeline_snapshot(snapshot_id)
def get_execution_plan_snapshot(self, snapshot_id):
return self._run_storage.get_execution_plan_snapshot(snapshot_id)
def get_run_stats(self, run_id):
return self._event_storage.get_stats_for_run(run_id)
def get_run_step_stats(self, run_id, step_keys=None):
return self._event_storage.get_step_stats_for_run(run_id, step_keys)
def get_run_tags(self):
return self._run_storage.get_run_tags()
def get_run_group(self, run_id):
return self._run_storage.get_run_group(run_id)
def create_run_for_pipeline(
self,
pipeline_def,
execution_plan=None,
run_id=None,
run_config=None,
mode=None,
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
solid_selection=None,
):
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.snap import snapshot_from_execution_plan
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.opt_inst_param(execution_plan, "execution_plan", ExecutionPlan)
# note that solids_to_execute is required to execute the solid subset, which is the
# frozenset version of the previous solid_subset.
# solid_selection is not required and will not be converted to solids_to_execute here.
# i.e. this function doesn't handle solid queries.
# solid_selection is only used to pass the user queries further down.
check.opt_set_param(solids_to_execute, "solids_to_execute", of_type=str)
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
if solids_to_execute:
if isinstance(pipeline_def, PipelineSubsetDefinition):
# for the case when pipeline_def is created by IPipeline or ExternalPipeline
check.invariant(
solids_to_execute == pipeline_def.solids_to_execute,
"Cannot create a PipelineRun from pipeline subset {pipeline_solids_to_execute} "
"that conflicts with solids_to_execute arg {solids_to_execute}".format(
pipeline_solids_to_execute=str_format_list(pipeline_def.solids_to_execute),
solids_to_execute=str_format_list(solids_to_execute),
),
)
else:
# for cases when `create_run_for_pipeline` is directly called
pipeline_def = pipeline_def.get_pipeline_subset_def(
solids_to_execute=solids_to_execute
)
if execution_plan:
final_execution_plan = execution_plan
check.invariant(
step_keys_to_execute is None,
"Should not pass execution_plan and step_keys_to_execute to create_run",
)
step_keys_to_execute = execution_plan.step_keys_to_execute
else:
resolved_run_config = ResolvedRunConfig.build(pipeline_def, run_config, mode)
full_execution_plan = ExecutionPlan.build(
InMemoryPipeline(pipeline_def),
resolved_run_config,
)
if is_memoized_run(tags):
from dagster.core.execution.resolve_versions import resolve_memoized_execution_plan
if step_keys_to_execute:
raise DagsterInvariantViolationError(
"step_keys_to_execute parameter cannot be used in conjunction with memoized "
"pipeline runs."
)
final_execution_plan = resolve_memoized_execution_plan(
full_execution_plan,
pipeline_def,
run_config,
self,
resolved_run_config,
) # TODO: tighter integration with existing step_keys_to_execute functionality
step_keys_to_execute = final_execution_plan.step_keys_to_execute
if not step_keys_to_execute:
raise DagsterNoStepsToExecuteException(
"No steps found to execute. "
"This is because every step in the plan has already been memoized."
)
elif step_keys_to_execute:
final_execution_plan = full_execution_plan.build_subset_plan(
step_keys_to_execute, pipeline_def, resolved_run_config
)
else:
final_execution_plan = full_execution_plan
return self.create_run(
pipeline_name=pipeline_def.name,
run_id=run_id,
run_config=run_config,
mode=check.opt_str_param(mode, "mode", default=pipeline_def.get_default_mode_name()),
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_def.get_pipeline_snapshot(),
execution_plan_snapshot=snapshot_from_execution_plan(
final_execution_plan,
pipeline_def.get_pipeline_snapshot_id(),
),
parent_pipeline_snapshot=pipeline_def.get_parent_pipeline_snapshot(),
)
def _construct_run_with_snapshots(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
status,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
external_pipeline_origin=None,
):
# https://github.com/dagster-io/dagster/issues/2403
if tags and IS_AIRFLOW_INGEST_PIPELINE_STR in tags:
if AIRFLOW_EXECUTION_DATE_STR not in tags:
tags[AIRFLOW_EXECUTION_DATE_STR] = get_current_datetime_in_utc().isoformat()
check.invariant(
not (not pipeline_snapshot and execution_plan_snapshot),
"It is illegal to have an execution plan snapshot and not have a pipeline snapshot. "
"It is possible to have no execution plan snapshot since we persist runs "
"that do not successfully compile execution plans in the scheduled case.",
)
pipeline_snapshot_id = (
self._ensure_persisted_pipeline_snapshot(pipeline_snapshot, parent_pipeline_snapshot)
if pipeline_snapshot
else None
)
execution_plan_snapshot_id = (
self._ensure_persisted_execution_plan_snapshot(
execution_plan_snapshot, pipeline_snapshot_id, step_keys_to_execute
)
if execution_plan_snapshot and pipeline_snapshot_id
else None
)
return PipelineRun(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot_id=pipeline_snapshot_id,
execution_plan_snapshot_id=execution_plan_snapshot_id,
external_pipeline_origin=external_pipeline_origin,
)
def _ensure_persisted_pipeline_snapshot(self, pipeline_snapshot, parent_pipeline_snapshot):
from dagster.core.snap import create_pipeline_snapshot_id, PipelineSnapshot
check.inst_param(pipeline_snapshot, "pipeline_snapshot", PipelineSnapshot)
check.opt_inst_param(parent_pipeline_snapshot, "parent_pipeline_snapshot", PipelineSnapshot)
if pipeline_snapshot.lineage_snapshot:
if not self._run_storage.has_pipeline_snapshot(
pipeline_snapshot.lineage_snapshot.parent_snapshot_id
):
check.invariant(
create_pipeline_snapshot_id(parent_pipeline_snapshot)
== pipeline_snapshot.lineage_snapshot.parent_snapshot_id,
"Parent pipeline snapshot id out of sync with passed parent pipeline snapshot",
)
returned_pipeline_snapshot_id = self._run_storage.add_pipeline_snapshot(
parent_pipeline_snapshot
)
check.invariant(
pipeline_snapshot.lineage_snapshot.parent_snapshot_id
== returned_pipeline_snapshot_id
)
pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot)
if not self._run_storage.has_pipeline_snapshot(pipeline_snapshot_id):
returned_pipeline_snapshot_id = self._run_storage.add_pipeline_snapshot(
pipeline_snapshot
)
check.invariant(pipeline_snapshot_id == returned_pipeline_snapshot_id)
return pipeline_snapshot_id
def _ensure_persisted_execution_plan_snapshot(
self, execution_plan_snapshot, pipeline_snapshot_id, step_keys_to_execute
):
from dagster.core.snap.execution_plan_snapshot import (
ExecutionPlanSnapshot,
create_execution_plan_snapshot_id,
)
check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot)
check.str_param(pipeline_snapshot_id, "pipeline_snapshot_id")
check.opt_list_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)
check.invariant(
execution_plan_snapshot.pipeline_snapshot_id == pipeline_snapshot_id,
(
"Snapshot mismatch: Snapshot ID in execution plan snapshot is "
'"{ep_pipeline_snapshot_id}" and snapshot_id created in memory is '
'"{pipeline_snapshot_id}"'
).format(
ep_pipeline_snapshot_id=execution_plan_snapshot.pipeline_snapshot_id,
pipeline_snapshot_id=pipeline_snapshot_id,
),
)
check.invariant(
set(step_keys_to_execute) == set(execution_plan_snapshot.step_keys_to_execute)
if step_keys_to_execute
else set(execution_plan_snapshot.step_keys_to_execute)
== set([step.key for step in execution_plan_snapshot.steps]),
"We encode step_keys_to_execute twice in our stack, unfortunately. This check "
"ensures that they are consistent. We check that step_keys_to_execute in the plan "
"matches the step_keys_to_execute params if it is set. If it is not, this indicates "
"a full execution plan, and so we verify that.",
)
execution_plan_snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)
if not self._run_storage.has_execution_plan_snapshot(execution_plan_snapshot_id):
returned_execution_plan_snapshot_id = self._run_storage.add_execution_plan_snapshot(
execution_plan_snapshot
)
check.invariant(execution_plan_snapshot_id == returned_execution_plan_snapshot_id)
return execution_plan_snapshot_id
def create_run(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
status,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
external_pipeline_origin=None,
):
pipeline_run = self._construct_run_with_snapshots(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=status,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
external_pipeline_origin=external_pipeline_origin,
)
return self._run_storage.add_run(pipeline_run)
def register_managed_run(
self,
pipeline_name,
run_id,
run_config,
mode,
solids_to_execute,
step_keys_to_execute,
tags,
root_run_id,
parent_run_id,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
solid_selection=None,
):
# The usage of this method is limited to dagster-airflow, specifically in Dagster
# Operators that are executed in Airflow. Because a common workflow in Airflow is to
# retry dags from arbitrary tasks, we need any node to be capable of creating a
# PipelineRun.
#
# The try-except DagsterRunAlreadyExists block handles the race when multiple "root" tasks
# simultaneously execute self._run_storage.add_run(pipeline_run). When this happens, only
# one task succeeds in creating the run, while the others get DagsterRunAlreadyExists
# error; at this point, the failed tasks try again to fetch the existing run.
# https://github.com/dagster-io/dagster/issues/2412
pipeline_run = self._construct_run_with_snapshots(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
step_keys_to_execute=step_keys_to_execute,
status=PipelineRunStatus.MANAGED,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
)
def get_run():
candidate_run = self.get_run_by_id(pipeline_run.run_id)
field_diff = _check_run_equality(pipeline_run, candidate_run)
if field_diff:
raise DagsterRunConflict(
"Found conflicting existing run with same id {run_id}. Runs differ in:"
"\n{field_diff}".format(
run_id=pipeline_run.run_id,
field_diff=_format_field_diff(field_diff),
),
)
return candidate_run
if self.has_run(pipeline_run.run_id):
return get_run()
try:
return self._run_storage.add_run(pipeline_run)
except DagsterRunAlreadyExists:
return get_run()
def add_run(self, pipeline_run: PipelineRun):
return self._run_storage.add_run(pipeline_run)
def handle_run_event(self, run_id: str, event: "DagsterEvent"):
return self._run_storage.handle_run_event(run_id, event)
def add_run_tags(self, run_id: str, new_tags: Dict[str, str]):
return self._run_storage.add_run_tags(run_id, new_tags)
def has_run(self, run_id: str) -> bool:
return self._run_storage.has_run(run_id)
def get_runs(
self, filters: PipelineRunsFilter = None, cursor: str = None, limit: int = None
) -> Iterable[PipelineRun]:
return self._run_storage.get_runs(filters, cursor, limit)
def get_runs_count(self, filters: PipelineRunsFilter = None) -> int:
return self._run_storage.get_runs_count(filters)
def get_run_groups(
self, filters: PipelineRunsFilter = None, cursor: str = None, limit: int = None
) -> Dict[str, Dict[str, Union[Iterable[PipelineRun], int]]]:
return self._run_storage.get_run_groups(filters=filters, cursor=cursor, limit=limit)
def get_run_records(
self,
filters: PipelineRunsFilter = None,
limit: int = None,
order_by: str = None,
ascending: bool = False,
) -> List[RunRecord]:
"""Return a list of run records stored in the run storage, sorted by the given column in given order.
Args:
filters (Optional[PipelineRunsFilter]): the filter by which to filter runs.
limit (Optional[int]): Number of results to get. Defaults to infinite.
order_by (Optional[str]): Name of the column to sort by. Defaults to id.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
List[RunRecord]: List of run records stored in the run storage.
"""
return self._run_storage.get_run_records(filters, limit, order_by, ascending)
def wipe(self):
self._run_storage.wipe()
self._event_storage.wipe()
def delete_run(self, run_id: str):
self._run_storage.delete_run(run_id)
self._event_storage.delete_events(run_id)
# event storage
def logs_after(self, run_id, cursor, of_type: "DagsterEventType" = None):
return self._event_storage.get_logs_for_run(run_id, cursor=cursor, of_type=of_type)
def all_logs(self, run_id, of_type: "DagsterEventType" = None):
return self._event_storage.get_logs_for_run(run_id, of_type=of_type)
def watch_event_logs(self, run_id, cursor, cb):
return self._event_storage.watch(run_id, cursor, cb)
# asset storage
def all_asset_keys(self):
return self._event_storage.all_asset_keys()
def has_asset_key(self, asset_key: AssetKey) -> bool:
return self._event_storage.has_asset_key(asset_key)
def events_for_asset_key(
self,
asset_key,
partitions=None,
before_cursor=None,
after_cursor=None,
cursor=None,
before_timestamp=None,
limit=None,
ascending=False,
):
check.inst_param(asset_key, "asset_key", AssetKey)
return self._event_storage.get_asset_events(
asset_key,
partitions,
before_cursor,
after_cursor,
limit,
before_timestamp=before_timestamp,
ascending=ascending,
include_cursor=True,
cursor=cursor,
)
def run_ids_for_asset_key(self, asset_key):
check.inst_param(asset_key, "asset_key", AssetKey)
return self._event_storage.get_asset_run_ids(asset_key)
def all_asset_tags(self):
return self._event_storage.all_asset_tags()
def get_asset_tags(self, asset_key):
check.inst_param(asset_key, "asset_key", AssetKey)
return self._event_storage.get_asset_tags(asset_key)
def wipe_assets(self, asset_keys):
check.list_param(asset_keys, "asset_keys", of_type=AssetKey)
for asset_key in asset_keys:
self._event_storage.wipe_asset(asset_key)
# event subscriptions
def get_logger(self):
logger = logging.Logger("__event_listener")
logger.addHandler(_EventListenerLogHandler(self))
logger.setLevel(10)
return logger
def handle_new_event(self, event):
run_id = event.run_id
self._event_storage.store_event(event)
if event.is_dagster_event and event.dagster_event.is_pipeline_event:
self._run_storage.handle_run_event(run_id, event.dagster_event)
for sub in self._subscribers[run_id]:
sub(event)
def add_event_listener(self, run_id, cb):
self._subscribers[run_id].append(cb)
def report_engine_event(
self,
message,
pipeline_run,
engine_event_data=None,
cls=None,
step_key=None,
):
"""
Report a EngineEvent that occurred outside of a pipeline execution context.
"""
from dagster.core.events import EngineEventData, DagsterEvent, DagsterEventType
from dagster.core.events.log import EventLogEntry
check.class_param(cls, "cls")
check.str_param(message, "message")
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
engine_event_data = check.opt_inst_param(
engine_event_data,
"engine_event_data",
EngineEventData,
EngineEventData([]),
)
if cls:
message = "[{}] {}".format(cls.__name__, message)
log_level = logging.INFO
if engine_event_data and engine_event_data.error:
log_level = logging.ERROR
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.ENGINE_EVENT.value,
pipeline_name=pipeline_run.pipeline_name,
message=message,
event_specific_data=engine_event_data,
)
event_record = EventLogEntry(
message=message,
user_message=message,
level=log_level,
pipeline_name=pipeline_run.pipeline_name,
run_id=pipeline_run.run_id,
error_info=None,
timestamp=time.time(),
step_key=step_key,
dagster_event=dagster_event,
)
self.handle_new_event(event_record)
return dagster_event
def report_run_canceling(self, run, message=None):
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.events.log import EventLogEntry
check.inst_param(run, "run", PipelineRun)
message = check.opt_str_param(
message,
"message",
"Sending pipeline termination request.",
)
canceling_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_CANCELING.value,
pipeline_name=run.pipeline_name,
message=message,
)
event_record = EventLogEntry(
message=message,
user_message="",
level=logging.INFO,
pipeline_name=run.pipeline_name,
run_id=run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=canceling_event,
)
self.handle_new_event(event_record)
def report_run_canceled(
self,
pipeline_run,
message=None,
):
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.events.log import EventLogEntry
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
message = check.opt_str_param(
message,
"mesage",
"This pipeline run has been marked as canceled from outside the execution context.",
)
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_CANCELED.value,
pipeline_name=pipeline_run.pipeline_name,
message=message,
)
event_record = EventLogEntry(
message=message,
user_message=message,
level=logging.ERROR,
pipeline_name=pipeline_run.pipeline_name,
run_id=pipeline_run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=dagster_event,
)
self.handle_new_event(event_record)
return dagster_event
def report_run_failed(self, pipeline_run, message=None):
from dagster.core.events import DagsterEvent, DagsterEventType
from dagster.core.events.log import EventLogEntry
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
message = check.opt_str_param(
message,
"message",
"This pipeline run has been marked as failed from outside the execution context.",
)
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_FAILURE.value,
pipeline_name=pipeline_run.pipeline_name,
message=message,
)
event_record = EventLogEntry(
message=message,
user_message=message,
level=logging.ERROR,
pipeline_name=pipeline_run.pipeline_name,
run_id=pipeline_run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=dagster_event,
)
self.handle_new_event(event_record)
return dagster_event
# directories
def file_manager_directory(self, run_id):
return self._local_artifact_storage.file_manager_dir(run_id)
def intermediates_directory(self, run_id):
return self._local_artifact_storage.intermediates_dir(run_id)
def storage_directory(self):
return self._local_artifact_storage.storage_dir
def schedules_directory(self):
return self._local_artifact_storage.schedules_dir
# Runs coordinator
def submit_run(self, run_id, external_pipeline):
"""Submit a pipeline run to the coordinator.
This method delegates to the ``RunCoordinator``, configured on the instance, and will
call its implementation of ``RunCoordinator.submit_run()`` to send the run to the
coordinator for execution. Runs should be created in the instance (e.g., by calling
``DagsterInstance.create_run()``) *before* this method is called, and
should be in the ``PipelineRunStatus.NOT_STARTED`` state. They also must have a non-null
ExternalPipelineOrigin.
Args:
run_id (str): The id of the run.
"""
from dagster.core.host_representation import ExternalPipelineOrigin
run = self.get_run_by_id(run_id)
check.inst(
run.external_pipeline_origin,
ExternalPipelineOrigin,
"External pipeline origin must be set for submitted runs",
)
try:
submitted_run = self._run_coordinator.submit_run(
run, external_pipeline=external_pipeline
)
except:
from dagster.core.events import EngineEventData
error = serializable_error_info_from_exc_info(sys.exc_info())
self.report_engine_event(
error.message,
run,
EngineEventData.engine_error(error),
)
self.report_run_failed(run)
raise
return submitted_run
# Run launcher
def launch_run(self, run_id, external_pipeline):
"""Launch a pipeline run.
This method is typically called using `instance.submit_run` rather than being invoked
directly. This method delegates to the ``RunLauncher``, if any, configured on the instance,
and will call its implementation of ``RunLauncher.launch_run()`` to begin the execution of
the specified run. Runs should be created in the instance (e.g., by calling
``DagsterInstance.create_run()``) *before* this method is called, and should be in the
``PipelineRunStatus.NOT_STARTED`` state.
Args:
run_id (str): The id of the run the launch.
"""
run = self.get_run_by_id(run_id)
from dagster.core.events import EngineEventData, DagsterEvent, DagsterEventType
from dagster.core.events.log import EventLogEntry
launch_started_event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_STARTING.value,
pipeline_name=run.pipeline_name,
)
event_record = EventLogEntry(
message="",
user_message="",
level=logging.INFO,
pipeline_name=run.pipeline_name,
run_id=run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=launch_started_event,
)
self.handle_new_event(event_record)
run = self.get_run_by_id(run_id)
try:
self._run_launcher.launch_run(run, external_pipeline=external_pipeline)
except:
error = serializable_error_info_from_exc_info(sys.exc_info())
self.report_engine_event(
error.message,
run,
EngineEventData.engine_error(error),
)
self.report_run_failed(run)
raise
return run
# Scheduler
def reconcile_scheduler_state(self, external_repository):
return self._scheduler.reconcile_scheduler_state(self, external_repository)
def start_schedule_and_update_storage_state(self, external_schedule):
return self._scheduler.start_schedule_and_update_storage_state(self, external_schedule)
def stop_schedule_and_update_storage_state(self, schedule_origin_id):
return self._scheduler.stop_schedule_and_update_storage_state(self, schedule_origin_id)
def stop_schedule_and_delete_from_storage(self, schedule_origin_id):
return self._scheduler.stop_schedule_and_delete_from_storage(self, schedule_origin_id)
def running_schedule_count(self, schedule_origin_id):
if self._scheduler:
return self._scheduler.running_schedule_count(self, schedule_origin_id)
return 0
def scheduler_debug_info(self):
from dagster.core.scheduler import SchedulerDebugInfo
from dagster.core.definitions.run_request import JobType
from dagster.core.scheduler.job import JobStatus
errors = []
schedules = []
for schedule_state in self.all_stored_job_state(job_type=JobType.SCHEDULE):
if schedule_state.status == JobStatus.RUNNING and not self.running_schedule_count(
schedule_state.job_origin_id
):
errors.append(
"Schedule {schedule_name} is set to be running, but the scheduler is not "
"running the schedule.".format(schedule_name=schedule_state.job_name)
)
elif schedule_state.status == JobStatus.STOPPED and self.running_schedule_count(
schedule_state.job_origin_id
):
errors.append(
"Schedule {schedule_name} is set to be stopped, but the scheduler is still running "
"the schedule.".format(schedule_name=schedule_state.job_name)
)
if self.running_schedule_count(schedule_state.job_origin_id) > 1:
errors.append(
"Duplicate jobs found: More than one job for schedule {schedule_name} are "
"running on the scheduler.".format(schedule_name=schedule_state.job_name)
)
schedule_info = {
schedule_state.job_name: {
"status": schedule_state.status.value,
"cron_schedule": schedule_state.job_specific_data.cron_schedule,
"repository_pointer": schedule_state.origin.get_repo_cli_args(),
"schedule_origin_id": schedule_state.job_origin_id,
"repository_origin_id": schedule_state.repository_origin_id,
}
}
schedules.append(yaml.safe_dump(schedule_info, default_flow_style=False))
return SchedulerDebugInfo(
scheduler_config_info=self._info_str_for_component("Scheduler", self.scheduler),
scheduler_info=self.scheduler.debug_info(),
schedule_storage=schedules,
errors=errors,
)
# Schedule Storage
def start_sensor(self, external_sensor):
from dagster.core.scheduler.job import JobState, JobStatus, SensorJobData
from dagster.core.definitions.run_request import JobType
job_state = self.get_job_state(external_sensor.get_external_origin_id())
if not job_state:
self.add_job_state(
JobState(
external_sensor.get_external_origin(),
JobType.SENSOR,
JobStatus.RUNNING,
SensorJobData(min_interval=external_sensor.min_interval_seconds),
)
)
elif job_state.status != JobStatus.RUNNING:
self.update_job_state(job_state.with_status(JobStatus.RUNNING))
def stop_sensor(self, job_origin_id):
from dagster.core.scheduler.job import JobStatus
job_state = self.get_job_state(job_origin_id)
if job_state:
self.update_job_state(job_state.with_status(JobStatus.STOPPED))
def all_stored_job_state(self, repository_origin_id=None, job_type=None):
return self._schedule_storage.all_stored_job_state(repository_origin_id, job_type)
def get_job_state(self, job_origin_id):
return self._schedule_storage.get_job_state(job_origin_id)
def add_job_state(self, job_state):
return self._schedule_storage.add_job_state(job_state)
def update_job_state(self, job_state):
return self._schedule_storage.update_job_state(job_state)
def delete_job_state(self, job_origin_id):
return self._schedule_storage.delete_job_state(job_origin_id)
def get_job_ticks(self, job_origin_id, before=None, after=None, limit=None):
return self._schedule_storage.get_job_ticks(
job_origin_id, before=before, after=after, limit=limit
)
def get_latest_job_tick(self, job_origin_id):
return self._schedule_storage.get_latest_job_tick(job_origin_id)
def create_job_tick(self, job_tick_data):
return self._schedule_storage.create_job_tick(job_tick_data)
def update_job_tick(self, tick):
return self._schedule_storage.update_job_tick(tick)
def get_job_tick_stats(self, job_origin_id):
return self._schedule_storage.get_job_tick_stats(job_origin_id)
def purge_job_ticks(self, job_origin_id, tick_status, before):
self._schedule_storage.purge_job_ticks(job_origin_id, tick_status, before)
def wipe_all_schedules(self):
if self._scheduler:
self._scheduler.wipe(self)
self._schedule_storage.wipe()
def logs_path_for_schedule(self, schedule_origin_id):
return self._scheduler.get_logs_path(self, schedule_origin_id)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.dispose()
def get_addresses_for_step_output_versions(self, step_output_versions):
"""
For each given step output, finds whether an output exists with the given
version, and returns its address if it does.
Args:
step_output_versions (Dict[(str, StepOutputHandle), str]):
(pipeline name, step output handle) -> version.
Returns:
Dict[(str, StepOutputHandle), str]: (pipeline name, step output handle) -> address.
For each step output, an address if there is one and None otherwise.
"""
return self._event_storage.get_addresses_for_step_output_versions(step_output_versions)
# dagster daemon
def add_daemon_heartbeat(self, daemon_heartbeat: "DaemonHeartbeat"):
"""Called on a regular interval by the daemon"""
self._run_storage.add_daemon_heartbeat(daemon_heartbeat)
def get_daemon_heartbeats(self) -> Dict[str, "DaemonHeartbeat"]:
"""Latest heartbeats of all daemon types"""
return self._run_storage.get_daemon_heartbeats()
def wipe_daemon_heartbeats(self):
self._run_storage.wipe_daemon_heartbeats()
def get_required_daemon_types(self):
from dagster.core.run_coordinator import QueuedRunCoordinator
from dagster.core.scheduler import DagsterDaemonScheduler
from dagster.daemon.daemon import SchedulerDaemon, SensorDaemon, BackfillDaemon
from dagster.daemon.run_coordinator.queued_run_coordinator_daemon import (
QueuedRunCoordinatorDaemon,
)
if self.is_ephemeral:
return []
daemons = [SensorDaemon.daemon_type(), BackfillDaemon.daemon_type()]
if isinstance(self.scheduler, DagsterDaemonScheduler):
daemons.append(SchedulerDaemon.daemon_type())
if isinstance(self.run_coordinator, QueuedRunCoordinator):
daemons.append(QueuedRunCoordinatorDaemon.daemon_type())
return daemons
# backfill
def get_backfills(self, status=None, cursor=None, limit=None):
return self._run_storage.get_backfills(status=status, cursor=cursor, limit=limit)
def get_backfill(self, backfill_id):
return self._run_storage.get_backfill(backfill_id)
def add_backfill(self, partition_backfill):
self._run_storage.add_backfill(partition_backfill)
def update_backfill(self, partition_backfill):
return self._run_storage.update_backfill(partition_backfill)
|
[] |
[] |
[
"DAGSTER_HOME"
] |
[]
|
["DAGSTER_HOME"]
|
python
| 1 | 0 | |
src/beer/ingest/db_setup.py
|
import psycopg2
import dotenv
import os
class DBSetup:
"""
Methods to remove / create tables
This script may be run directly to remove existing tables and re-create new, empty tables.
"""
def __init__(self, host, user, password, dbname):
self.conn = psycopg2.connect(dbname=dbname, host=host, user=user, password=password)
self.cur = self.conn.cursor()
def remove_tables_if_present(self):
"""
Drop inventory, products, sizes, and categories (if they exist)
:return: None
"""
self.cur.execute('DROP TABLE IF EXISTS transactions;')
self.cur.execute('DROP TABLE IF EXISTS inventory;')
self.cur.execute('DROP TABLE IF EXISTS products;')
self.cur.execute('DROP TABLE IF EXISTS names')
self.cur.execute('DROP TABLE IF EXISTS timestamps')
self.cur.execute('DROP TABLE IF EXISTS current_inventory')
def create_tables(self):
""""
Create the producdts, sizes, categories, and inventory tables.
"""
create_timestamps = """CREATE TABLE timestamps (
timestamp TIMESTAMP PRIMARY KEY
);
"""
create_names = """CREATE TABLE names (
name_id SERIAL PRIMARY KEY,
tanczos_name TEXT
);
"""
create_products = """
CREATE TABLE products (
product_id SERIAL PRIMARY KEY,
name_id SERIAL REFERENCES names(name_id),
container_quantity INTEGER,
quantity_in_case INTEGER,
container_volume TEXT,
container_type TEXT,
tanczos_category TEXT,
date_added TIMESTAMP REFERENCES timestamps(timestamp)
);
"""
# inventory_id is BIGSERIAL because we generate approximately 200 million per year
create_inventory = """CREATE TABLE inventory (
inventory_id BIGSERIAL PRIMARY KEY,
product_id INTEGER REFERENCES products(product_id),
quantity NUMERIC(8,3),
retail NUMERIC(8,4),
case_retail NUMERIC(8,4),
timestamp TIMESTAMP REFERENCES timestamps(timestamp),
UNIQUE(product_id, timestamp)
);
"""
# pre_inventory_id may be -1 if new product
create_transactions = """CREATE TABLE transactions (
transaction_id BIGSERIAL PRIMARY KEY,
pre_inventory_id BIGSERIAL,
post_inventory_id BIGSERIAL REFERENCES inventory(inventory_id),
product_id SERIAL REFERENCES products(product_id),
transaction_quantity NUMERIC(8,3),
retail NUMERIC(8,4),
case_retail NUMERIC(8,4),
timestamp TIMESTAMP REFERENCES timestamps(timestamp)
);
"""
create_current_inventory = """
CREATE TABLE current_inventory (
name TEXT,
size TEXT,
category TEXT,
quantity NUMERIC(8,3),
retail NUMERIC(8,4),
case_retail NUMERIC(8,4),
case_pack INTEGER,
timestamp TIMESTAMP
);
"""
self.cur.execute(create_timestamps)
self.cur.execute(create_names)
self.cur.execute(create_products)
self.cur.execute(create_inventory)
self.cur.execute(create_transactions)
self.cur.execute(create_current_inventory)
def create_indexes(self):
self.cur.execute('CREATE INDEX inventory_timestamp_index ON inventory(timestamp);')
def commit(self):
"""
Commit changes and close the connection
:return:
"""
self.conn.commit()
self.cur.close()
self.conn.close()
if __name__ == '__main__':
dotenv.load_dotenv()
host = os.getenv('DB_HOST')
user = os.getenv('DB_USER')
password = os.getenv('DB_PASSWORD')
database_name = os.getenv('DB_NAME')
dbs = DBSetup(host, user, password, database_name)
dbs.remove_tables_if_present()
dbs.create_tables()
dbs.create_indexes()
dbs.commit()
|
[] |
[] |
[
"DB_NAME",
"DB_USER",
"DB_PASSWORD",
"DB_HOST"
] |
[]
|
["DB_NAME", "DB_USER", "DB_PASSWORD", "DB_HOST"]
|
python
| 4 | 0 | |
clackbot.py
|
import discord
import emoji
import os
import random
import requests
from discord.ext import commands
from dotenv import load_dotenv
from frogtips import api as frogtips_api
from uuid import UUID
# Set version number
VERSION_NUMBER = "0.8"
# Load environment variables from .env file
load_dotenv()
# Set constants from environment variables
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
API_HOST = os.getenv('API_HOST')
# Set up bot
bot = commands.Bot(command_prefix='!')
# Set up bot commands
@bot.command(name='version')
async def say_version(context):
"""Display clackbot version information."""
await context.send(f'clackbot {VERSION_NUMBER} © 2020 <@!572963354902134787>')
@bot.command(name='source')
async def say_source_url(context):
"""Display the link to clackbot's source code on GitHub."""
await context.send('https://github.com/doopqoob/clackbot')
@bot.command(name='follow')
async def join_voice(context):
"""Follow a user into a voice channel."""
if context.author.voice is None:
await context.send('You need to be in a voice channel to run that command here (confusing, huh?)')
return
channel = context.author.voice.channel
await channel.connect()
@bot.command(name='leave')
async def leave_voice(context):
"""Leave the currently-joined voice channel."""
await context.voice_client.disconnect()
@bot.command(name='clack')
async def play_clacking(context):
"""Play a 'clacking' sound into the currently-joined voice channel."""
# Get list of filenames from 'clacks' directory
(_, _, filenames) = next(os.walk('clacks/'))
# randomize list
random.shuffle(filenames)
# set up voice channel
guild = context.guild
voice_client: discord.VoiceClient = discord.utils.get(bot.voice_clients, guild=guild)
if voice_client is None:
await context.send("I need to be in a voice channel to do that. Join the desired voice channel "
+ "then type !follow in this channel.")
return
# set up audio source
audio_source = discord.FFmpegPCMAudio(f'clacks/{filenames[0]}')
if not voice_client.is_playing():
voice_client.play(audio_source, after=None)
@bot.command(name='stop')
async def stop_clacking(context):
"""Stop playing the 'clacking' sound."""
guild = context.guild
voice_client: discord.VoiceClient = discord.utils.get(bot.voice_clients, guild=guild)
if voice_client is None:
return
if voice_client.is_playing():
voice_client.stop()
@bot.command(name='poll')
async def poll(context, *args):
"""Create a poll with up to twenty answers (good lord)."""
# Check input for common error conditions
if len(args) == 0:
await context.send("ERROR: No poll question or answers specified.")
return
if len(args) == 1:
await context.send("ERROR: No answers to poll question specified.")
return
# ...and perhaps a not-so-common error condition
if len(args) > 21:
await context.send("WHOA! That's just too many answers (maximum 20).")
return
# Comb out the input into question and answers (which we treat differently below)
question = None
answers = []
for counter, value in enumerate(args):
if counter == 0:
question = value
else:
answers.append(value)
# Start preparing the poll output
question += "\n----------\n"
for counter, answer in enumerate(answers):
char = chr(97 + counter)
emoji_desc = f':regional_indicator_{char}:'
question += f'{emoji_desc}: {answer}\n'
message = await context.send(question)
# Add reaction emoji for poll answers
for counter, answer in enumerate(answers):
char = chr(97 + counter)
emoji_desc = f':regional_indicator_{char}:'.capitalize()
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await message.add_reaction(emoji_unicode)
@bot.command(name='frogtip')
async def frog_tip(context, tip_id=None):
"""Display a FROG Tip"""
if tip_id is None:
tip = frogtips_api.Tips().get_next_tip()
else:
tip_id = int(tip_id)
tip = frogtips_api.Tip(tip_id)
formatted_tip = tip.get_formatted_tip()
formatted_tip += '\n'
formatted_tip += "https://frog.tips/#" + str(tip.get_id())
await context.send(formatted_tip)
@bot.command(name='kbdb')
async def query_kb_db(context, part_num=None):
"""Query SharktasticA's IBM keyboard database by part number"""
# Make sure the user has entered a part number
if part_num is None:
await context.send("ERROR: No keyboard part number provided.")
return
# These are all the fields we're going to request from the database
# (and their long names for when we display the data in chat)
fields_dict = {'pn': "Part Number",
'fru': "FRU Part Number",
'name': "Full Name",
'type': "Type",
'shorthand': "Shorthand",
'nickname': "Nickname",
'model': "Marketing Name",
'oem': "OEM (Manufacturer)",
'switches': "Switches",
'date': "First Appeared",
'keys': "Key Count",
'formfactor': "Form Factor",
'keycaps': "Keycap Type",
'case': "Case Colour",
'branding': "Branding",
'feet': "Feet Type",
'protocol': "Protocol",
'connection': "Connection",
'cable': "Cable",
'layout': "Layout/Language",
'mouse': "Int. Pointing Device",
'price': "Price",
'notes': "Notes"}
# Extract just the field names from fields_dict
requested_fields = fields_dict.keys()
# Turn the list of fields into a comma-delimited string:
fields = ','.join(requested_fields)
# Build the url
url = f'https://sharktastica.co.uk/kb_db_req.php?pn={part_num}&dat=JSON&fields={fields}'
# Query the DB
result = requests.get(url)
# Convert JSON into a python data structure
result = result.json()
# Handle situation where no results are returned
if result['hits'] == 0:
message = f"ERROR: Part number {part_num} not found in database.\n"
message += "Would you like to add it to the database? Just visit\n"
message += f"https://sharktastica.co.uk/kb_db_sub.php?pn={part_num}"
await context.send(message)
return
# Build the response
response = f"Here's what I found about part {part_num}:\n\n"
for kb in result['results']:
link = kb.pop('link', None)
for key in kb.keys():
if kb[key] is not None:
response += f'**{fields_dict[key]}:** {kb[key]}\n'
if link is not None:
response += f'\n**Permalink:** <{link}>\n'
response += '\n'
response += 'Learn about where this data came from: https://sharktastica.co.uk/about.php#Sources'
# aaand send it off!
await context.send(response)
@bot.command(name='kbsearch')
async def search_kb_db(context, *args):
"""Search SharktasticA's IBM keyboard database."""
# The query is the first argument given
# (Shark's database takes care of figuring out
# what query type this is)
query = args[0]
result_count = 5
# Build the URL
url = f'https://sharktastica.co.uk/kb_db_req.php?q={query}&c={result_count}' \
f'&dat=JSON&fields=pn,name,shorthand,layout,date'
# Because the query takes a long time to run,
# indicate to the user that something is happening.
await context.send(f"Searching for _{query}_. Just a moment...")
# display the 'typing' indicator while searching for user's query
async with context.channel.typing():
# Query the DB
result = requests.get(url)
# Convert JSON into a python data structure
result = result.json()
# Handle situation where no results are returned
if result['success'] is False:
message = f'ERROR: {result["message"]}'
await context.send(message)
return
# Handle other situation where no results are returned
if result['hits'] == 0:
message = f'ERROR: No results for _{query}_ in database.'
await context.send(message)
return
# Build the response
response = f"Here's what I found for _{query}_:\n"
hits = result['hits']
# Iterate through each keyboard in result.
for index, kb in enumerate(result['results']):
hits -= 1
response += f"> {index + 1}:\n" \
f"> _Part number_: {kb['pn']}\n" \
f"> _Name_: {kb['name']}\n" \
f"> _Shorthand_: {kb['shorthand']}\n" \
f"> _Layout_: {kb['layout']}\n" \
f"> _Date First Seen_: {kb['date']}\n" \
f"\n"
if hits > 0:
response += f'Plus an additional {hits} results.\n\n'
response += "You can type `!kbdb [part number]` to find out more about a specific keyboard.\n"
await context.send(response)
response = 'To learn how to search efficiently, see https://sharktastica.co.uk/kb_db_help.php#SearchingGuide'
await context.send(response)
response = 'Learn about where this data came from: https://sharktastica.co.uk/about.php#Sources'
await context.send(response)
@bot.command(name='quote')
async def get_quote(context, *args):
"""Get a quote from the database."""
# Get quote ID, if any, as first argument
quote_id = None
if len(args) == 1:
quote_id = args[0]
headers = {'Content-type': 'application/json'}
response = None
if quote_id:
# Validate input as a valid version 4 UUID before continuing
try:
UUID(quote_id, version=4)
except ValueError:
await context.send("Invalid quote ID")
return
# Get quote
params = {"id": quote_id}
response = requests.get(f'http://{API_HOST}/getquote', params=params, headers=headers)
else:
response = requests.get(f'http://{API_HOST}/getquote', headers=headers)
if response.status_code == 404:
await context.send("Quote not found")
return
# parse the JSON into a python object
quote = response.json()
# assemble the text we're going to send to discord
quote_text = ""
for line in quote['quote']:
quote_text += "> " + line + "\n"
quote_text += "said by <@!" + str(quote['said_by']['id']) + ">\n"
quote_text += "added " + quote['added_at'] + " by <@!" + str(quote['added_by']['id']) + ">\n"
quote_text += "id " + quote['id'] + "\n"
quote_text += "score: " + str(quote['score'])
# and send it off!
message = await context.send(quote_text)
# make note of the message id to track voting
params = {"message_id": message.id,
"quote_id": quote['id']}
response = requests.get(f'http://{API_HOST}/addvotemessage', params=params, headers=headers)
if response.status_code != 201:
await context.send("Something went wrong preparing this quote for voting.")
return
# add voting buttons
emoji_desc = ':up_arrow:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await message.add_reaction(emoji_unicode)
emoji_desc = ':down_arrow:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await message.add_reaction(emoji_unicode)
emoji_desc = ':keycap_0:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await message.add_reaction(emoji_unicode)
# Add a quote with !addquote
@bot.command(name='addquote')
async def add_quote(context, *args):
"""Add a quote to the quote DB. Simply reply to a message you want to quote, then type !addquote and press enter.
!addquote must be at the start of the first line of your reply."""
raw_quote = None
quoted_user_id = None
if context.message.reference:
if context.message.reference.cached_message:
raw_quote = context.message.reference.cached_message.content
quoted_user_id = context.message.reference.cached_message.author.id
if raw_quote is None:
await context.send("No text found in the message you replied to. Please try again with a different message.")
return
# Split up the raw quote by linefeed
split_quote = raw_quote.split('\n')
# Build the quote object we will submit to the database
quote = {}
quote['added_by'] = {}
quote['added_by']['id'] = context.message.author.id
quote['added_by']['handle'] = context.message.author.name
quote['added_by']['discriminator'] = int(context.message.author.discriminator)
quoted_user = bot.get_user(quoted_user_id)
quote['said_by'] = {}
quote['said_by']['id'] = quoted_user.id
quote['said_by']['handle'] = quoted_user.name
quote['said_by']['discriminator'] = int(quoted_user.discriminator)
quote['quote'] = []
for line in split_quote:
if line != "":
quote['quote'].append(line)
if not quote['quote']:
await context.send("No text found in the message you replied to. Please try again with a different message.")
return
# Submit to the database
headers = {'Content-type': 'application/json'}
response = requests.post(f'http://{API_HOST}/addquote', json=quote, headers=headers)
# Was the quote added successfully?
if response.status_code != 201:
await context.send("Something went wrong adding your quote.")
return
# Decode response into python object
response = response.json()
# Is the response a valid uuid?
try:
UUID(response['id'], version=4)
except ValueError:
# If not, failure.
await context.send("Something went wrong adding your quote.")
return
# If so, success!
# Rebuild the quote
rebuilt_quote = ""
for line in quote['quote']:
rebuilt_quote += '> ' + line + '\n'
rebuilt_quote += "said by <@!" + str(quote['said_by']['id']) + ">\n"
rebuilt_quote += "added by <@!" + str(quote['added_by']['id']) + ">\n"
rebuilt_quote += "id " + response['id'] + "\n"
rebuilt_quote += "score: 0"
# Send the quote to channel and make note of the message ID
# so that voting buttons can be added
quote_message = await context.send(rebuilt_quote)
params = {"message_id": quote_message.id,
"quote_id": response['id']}
# Prepare quote for voting
response = requests.get(f'http://{API_HOST}/addvotemessage', params=params, headers=headers)
if response.status_code != 201:
await context.send("Something went wrong preparing this quote for voting.")
return
# add voting buttons
emoji_desc = ':up_arrow:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await quote_message.add_reaction(emoji_unicode)
emoji_desc = ':down_arrow:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await quote_message.add_reaction(emoji_unicode)
emoji_desc = ':keycap_0:'
emoji_unicode = emoji.emojize(emoji_desc, use_aliases=True)
await quote_message.add_reaction(emoji_unicode)
return
@bot.command(name='delquote')
async def del_quote(context, *args):
"""Delete a quote from the database."""
# Get quote ID, if any, as first argument
quote_id = None
if len(args) == 1:
quote_id = args[0]
if not quote_id:
await context.send("You must provide a quote ID.")
return
# Validate input as a valid version 4 UUID before continuing
try:
UUID(quote_id, version=4)
except ValueError:
await context.send("Invalid quote ID")
return
# Get quote
params = {"id": quote_id}
headers = {'Content-type': 'application/json'}
response = requests.get(f'http://{API_HOST}/getquote', params=params, headers=headers)
quote = response.json()
if 'added_by' not in quote:
await context.send("Invalid quote ID")
return
# Make sure the user is authorized to delete the quote
# print(context.author.top_role.name)
if context.author.top_role.name != "Keyboard Lords":
if quote['added_by']['id'] != context.author.id and quote['said_by']['id'] != context.author.id:
await context.send("Only the person who submitted the quote, the person named in the quote, " +
f"or an administrator may delete quote {quote_id}")
return
# Delete the quote
response = requests.get(f'http://{API_HOST}/delquote', params=params, headers=headers)
if response.status_code == 200:
await context.send("Successfully deleted quote " + quote_id)
return
else:
await context.send("Something went wrong deleting quote " + quote_id)
return
@bot.event
async def on_raw_reaction_add(payload):
"""Called when a discord user adds a reaction to a message"""
# If it's the bot adding a reaction, igonre it
if payload.member.id == bot.user.id:
return
# Convert emoji into something pronounceable
emoji_name = emoji.demojize(str(payload.emoji))
# If it's not an upvote, a downvote, or a 0 vote, ignore it
allowed_emoji = [':up_arrow:', ':down_arrow:', ':keycap_0:']
if emoji_name not in allowed_emoji:
return
# Build the ballot
ballot = {"message_id": payload.message_id,
"voter_id": {
"id": payload.member.id,
"handle": payload.member.display_name,
"discriminator": int(payload.member.discriminator)}}
# Determine the vote
if emoji_name == ':up_arrow:':
ballot['vote'] = 1
elif emoji_name == ':down_arrow:':
ballot['vote'] = -1
elif emoji_name == ':keycap_0:':
ballot['vote'] = 0
# Submit to the database
headers = {'Content-type': 'application/json'}
response = requests.post(f'http://{API_HOST}/vote', json=ballot, headers=headers)
if response.status_code != 201:
# TODO: respond with real error message
print(f"error recording vote for {ballot['voter_id']['handle']} ({ballot['voter_id']['id']})")
print(response.content)
return
# run the bot
bot.run(DISCORD_TOKEN)
|
[] |
[] |
[
"DISCORD_TOKEN",
"API_HOST"
] |
[]
|
["DISCORD_TOKEN", "API_HOST"]
|
python
| 2 | 0 | |
research/compression/distillation/resnet.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks
(also known as ResNet v2).
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
parse_record_fn, num_epochs=1, num_parallel_calls=1):
"""Given a Dataset with raw records, parse each record into images and labels,
and return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# We prefetch a batch at a time, This can help smooth out the time taken to
# load input files as we go through shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffle the records. Note that we shuffle before repeating to ensure
# that the shuffling respects epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# If we are training over multiple epochs before evaluating, repeat the
# dataset for the appropriate number of epochs.
dataset = dataset.repeat(num_epochs)
# Parse the raw records into images and labels
dataset = dataset.map(lambda value: parse_record_fn(value, is_training),
num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path.
dataset = dataset.prefetch(1)
return dataset
################################################################################
# Functions building the ResNet model.
################################################################################
def batch_norm_relu(inputs, training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def building_block(inputs, filters, training, projection_shortcut, strides,
data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, training, projection_shortcut,
strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_layer(inputs, filters, block_fn, blocks, strides, training, name,
data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, training, None, 1, data_format)
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet v2 Model.
"""
def __init__(self, resnet_size, num_classes, num_filters, kernel_size,
conv_stride, first_pool_size, first_pool_stride, probe_pool_size,
second_pool_size, second_pool_stride, probe_pool_stride,
block_fn, block_sizes, pool_type, num_probes,
block_strides, final_size, data_format=None):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
probe_pool_size: Number to pool the probes by.
probe_pool_stride: stride size for the probe pooling layer
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer
of the model. This number is then doubled for each subsequent block
layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer.
If none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used
if first_pool_size is None.
second_pool_size: Pool size to be used for the second pooling layer.
second_pool_stride: stride size for the final pooling layer
block_fn: Which block layer function should be used? Pass in one of
the two functions defined above: building_block or bottleneck_block
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
pool_type: 'max' or 'mean'.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
final_size: The expected size of the model after the second pooling.
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.second_pool_size = second_pool_size
self.second_pool_stride = second_pool_stride
self.probe_pool_size = probe_pool_size
self.probe_pool_stride = probe_pool_stride
self.block_fn = block_fn
self.block_sizes = block_sizes
self.block_strides = block_strides
self.final_size = final_size
self.pool_type = pool_type
self.num_probes = num_probes
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with tf.variable_scope('input_transforms'):
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(inputs, [0, 3, 1, 2])
with tf.variable_scope('mentor') as scope:
# mentor
mentor = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'initial_conv')
if self.first_pool_size:
mentor = tf.layers.max_pooling2d(
inputs=mentor, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'initial_max_pool')
mentor_probes = []
probe_count = 0
for i, num_blocks in enumerate(self.block_sizes[0]):
num_filters = self.num_filters * (2**i)
mentor = block_layer(
inputs=mentor, filters=num_filters, block_fn=self.block_fn,
blocks=num_blocks, strides=self.block_strides[i],
training=training, name='mentor_' + 'block_layer{}'.format(i + 1),
data_format=self.data_format)
if probe_count < self.num_probes:
if self.probe_pool_size > 0:
if self.pool_type == 'max':
mentor_probe = tf.layers.max_pooling2d(
inputs=mentor, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentor_probe = tf.identity(mentor, 'mentor_'+'probe_max_pool_' \
+ str(i))
elif self.pool_type == 'mean':
mentor_probe = tf.layers.average_pooling2d(
inputs=mentor, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentor_probe = tf.identity(mentor, 'mentor_'+'probe_mean_pool_' \
+ str(i))
else:
mentor_probe = mentor
mentor_probes.append(mentor_probe)
probe_count+=1
mentor = batch_norm_relu(mentor, training, self.data_format)
mentor = tf.layers.average_pooling2d(
inputs=mentor, pool_size=self.second_pool_size,
strides=self.second_pool_stride, padding='VALID',
data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'final_avg_pool')
mentor = tf.reshape(mentor, [-1, self.final_size])
mentor = tf.layers.dense(inputs=mentor, units=self.num_classes)
mentor = tf.identity(mentor, 'mentor_' + 'final_dense')
mentor_probes.append(mentor)
with tf.variable_scope('mentee') as scope:
# mentee
mentee = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'initial_conv')
if self.first_pool_size:
mentee = tf.layers.max_pooling2d(
inputs=mentee, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'initial_max_pool')
probe_count = 0
mentee_probes = []
for i, num_blocks in enumerate(self.block_sizes[1]):
num_filters = self.num_filters * (2**i)
mentee = block_layer(
inputs=mentee, filters=num_filters, block_fn=self.block_fn,
blocks=num_blocks, strides=self.block_strides[i],
training=training, name='mentee_' + 'block_layer{}'.format(i + 1),
data_format=self.data_format)
if probe_count < self.num_probes:
if self.probe_pool_size > 0:
if self.pool_type == 'max':
mentee_probe = tf.layers.max_pooling2d(
inputs=mentee, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \
+ str(i))
elif self.pool_type == 'mean':
mentee_probe = tf.layers.average_pooling2d(
inputs=mentee, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \
+ str(i))
else:
mentee_probe=mentee
mentee_probes.append(mentee_probe)
probe_count+=1
mentee = batch_norm_relu(mentee, training, self.data_format)
mentee = tf.layers.average_pooling2d(
inputs=mentee, pool_size=self.second_pool_size,
strides=self.second_pool_stride, padding='VALID',
data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'final_avg_pool')
mentee = tf.reshape(mentee, [-1, self.final_size])
mentee = tf.layers.dense(inputs=mentee, units=self.num_classes)
mentee = tf.identity(mentee, 'mentee_' + 'final_dense')
mentee_probes.append(mentee)
probe_cost = tf.constant(0.)
for mentor_feat, mentee_feat in zip(mentor_probes, mentee_probes):
probe_cost = probe_cost + tf.losses.mean_squared_error (
mentor_feat, mentee_feat)
return (mentor, mentee, probe_cost)
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. Should be the same length as
boundary_epochs.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
with tf.variable_scope('learning_rate'):
initial_learning_rate = 0.01 * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.piecewise_constant(global_step, boundaries, vals)
return rval
return learning_rate_fn
def learning_rate_with_decay_2( initial_learning_rate,
batch_size, batch_denom, num_images, boundary_epochs, decay_rates):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. Should be the same length as
boundary_epochs.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
with tf.variable_scope('learning_rate'):
batches_per_epoch = num_images / batch_size
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.piecewise_constant(global_step, boundaries, vals)
return rval
return learning_rate_fn
def distillation_coeff_fn(intital_distillation, global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.exponential_decay (
intital_distillation,
global_step,
100000,
0.55,
staircase = False)
return rval
def resnet_model_fn(features, labels, mode, model_class, trainee,
distillation_coeff, probes_coeff, resnet_size, num_probes,
weight_decay_coeff, learning_rate_fn_mentor,
learning_rate_fn_mentee, learning_rate_fn_finetune,
momentum, data_format, pool_probes, pool_type,
temperature=1, optimizer='momentum',
loss_filter_fn=None):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
trainee: A string either `'mentee'` or `'mentor`'.
resnet_size: A list of two integers for the size of the ResNet model for
mentor followed by mentee.
weight_decay_coeff: weight decay rate used to regularize learned variables.
distillation_coeff: Weight for distillation.
probes_coeff: weight for probes.
learning_rate_fn_mentor: function that returns the current learning rate given
the current global_step
learning_rate_fn_mentee: function that returns the current learning rate given
the current global_step
learning_rate_fn_finetune: function that returns the current learning rate given
the current global_step
num_probes: How many equally spaced probes do we need.
momentum: momentum term used for optimization.
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
temperature: A value of temperature to use for distillation. Defaults to 1
so that it will remain backward compatible.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
pool_probes: Downsampling for probes.
pool_type: 'max' or 'mean'.
optimizer: 'adam', 'adadelta' and 'momentum' are options.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
with tf.variable_scope('inputs'):
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
model = model_class(resnet_size = resnet_size,
pool_probes = pool_probes,
pool_type = pool_type,
num_probes = num_probes,
data_format = data_format)
logits_mentor, logits_mentee, probe_cost = model(features,
mode == tf.estimator.ModeKeys.TRAIN)
predictions_mentor = {
'classes': tf.argmax(logits_mentor, axis=1),
'probabilities': tf.nn.softmax(logits_mentor,
name='softmax_tensor_mentor'),
}
predictions_mentee = {
'classes': tf.argmax(logits_mentee, axis=1),
'probabilities': tf.nn.softmax(logits_mentee,
name='softmax_tensor_mentee'),
}
if mode == tf.estimator.ModeKeys.PREDICT:
if trainee == 'mentor':
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions_mentor)
elif trainee == 'mentee' or trainee == 'finetune':
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions_mentee)
with tf.variable_scope('distillery'):
temperature_softmax_mentor = tf.nn.softmax((tf.div(logits_mentor,
temperature)), name ='softmax_temperature_tensor_mentor')
distillation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits = tf.div(logits_mentee,temperature),
labels = temperature_softmax_mentor))
tf.identity(distillation_loss, name='distillation_loss')
tf.summary.scalar('distillation_loss', distillation_loss)
tf.summary.scalar('scaled_distillation_loss', distillation_coeff *
distillation_loss)
with tf.variable_scope('cross_entropy'):
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy_mentor = tf.losses.softmax_cross_entropy(
logits=logits_mentor, onehot_labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy_mentor, name='cross_entropy_mentor')
tf.summary.scalar('cross_entropy_mentor', cross_entropy_mentor)
cross_entropy_mentee = tf.losses.softmax_cross_entropy(
logits=logits_mentee, onehot_labels=labels)
tf.identity(cross_entropy_mentee, name='cross_entropy_mentee')
tf.summary.scalar('cross_entropy_mentee', cross_entropy_mentee)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
if not loss_filter_fn:
def loss_filter_fn(name):
return 'batch_normalization' not in name
mentor_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='mentor')
mentee_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='mentee')
with tf.variable_scope('regularizers'):
if weight_decay_coeff > 0:
l2_mentor = weight_decay_coeff * tf.add_n(
[tf.nn.l2_loss(v) for v in mentor_variables
if loss_filter_fn(v.name)])
l2_mentee = weight_decay_coeff * tf.add_n(
[tf.nn.l2_loss(v) for v in mentee_variables
if loss_filter_fn(v.name)])
else:
l2_mentor = tf.constant(0.)
l2_mentee = tf.constant(0.)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.variable_scope('learning_rates'):
global_step = tf.train.get_or_create_global_step()
learning_rate_mentor = learning_rate_fn_mentor(global_step)
learning_rate_mentee = learning_rate_fn_mentee(global_step)
learning_rate_finetune = learning_rate_fn_finetune(global_step)
tf.identity(learning_rate_mentor, name='learning_rate_mentor' )
tf.summary.scalar('learning_rate_mentor', learning_rate_mentor)
tf.identity(learning_rate_mentee, name='learning_rate_mentee' )
tf.summary.scalar('learning_rate_mentee', learning_rate_mentee)
tf.identity(learning_rate_finetune, name='learning_rate_finetune' )
tf.summary.scalar('learning_rate_finetune', learning_rate_finetune)
with tf.variable_scope('mentor_cumulative_loss'):
# Add weight decay and distillation to the loss.
loss_mentor = cross_entropy_mentor + l2_mentor
tf.summary.scalar('objective', loss_mentor)
with tf.variable_scope('mentee_cumulative_loss'):
distillation_coeff_decayed = distillation_coeff_fn(distillation_coeff,
global_step)
probe_scale = probes_coeff * distillation_coeff_decayed
tf.identity(probe_cost, name='probe_cost')
tf.summary.scalar('probe_loss', probe_cost)
tf.summary.scalar('scaled_probe_loss', probe_scale *
probe_cost)
tf.identity(distillation_coeff, name='distillation_coeff_decayed')
tf.summary.scalar('coeff',distillation_coeff_decayed)
loss_mentee = cross_entropy_mentee + l2_mentee + \
distillation_coeff_decayed * distillation_loss + \
probe_scale * probe_cost
tf.summary.scalar('objective', loss_mentee)
with tf.variable_scope('mentee_finetune'):
loss_finetune = cross_entropy_mentee + l2_mentee
tf.summary.scalar('objective', loss_finetune)
if optimizer[0] == 'momentum':
with tf.variable_scope('mentor_momentum_optimizer'):
optimizer_mentor = tf.train.MomentumOptimizer(
learning_rate=learning_rate_mentor,
momentum=momentum)
elif optimizer[0] == 'adam':
with tf.variable_scope('mentor_adam_optimizer'):
optimizer_mentor = tf.train.AdamOptimizer(
learning_rate=learning_rate_mentor)
elif optimizer[0] == 'adadelta':
with tf.variable_scope('mentor_adadelta_optimizer'):
optimizer_mentor = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_mentor)
if optimizer[1] == 'momentum':
with tf.variable_scope('mentee_momentum_optimizer'):
optimizer_mentee = tf.train.MomentumOptimizer(
learning_rate=learning_rate_mentee,
momentum=momentum)
elif optimizer[1] == 'adam':
with tf.variable_scope('mentee_adam_optimizer'):
optimizer_mentee = tf.train.AdamOptimizer(
learning_rate=learning_rate_mentee)
elif optimizer[1] == 'adadelta':
with tf.variable_scope('mentee_adadelta_optimizer'):
optimizer_mentee = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_mentee)
if optimizer[2] == 'momentum':
with tf.variable_scope('finetune_momentum_optimizer'):
optimizer_finetune = tf.train.MomentumOptimizer(
learning_rate=learning_rate_finetune,
momentum=momentum)
elif optimizer[2] == 'adam':
with tf.variable_scope('finetune_adam_optimizer'):
optimizer_finetune = tf.train.AdamOptimizer(
learning_rate=learning_rate_finetune)
elif optimizer[2] == 'adadelta':
with tf.variable_scope('finetune_adadelta_optimizer'):
optimizer_finetune = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_finetune)
# Batch norm requires update ops to be added as a dependency to train_op
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
with tf.variable_scope('optimizers'):
train_op_mentor = optimizer_mentor.minimize(loss_mentor,
global_step,
var_list = mentor_variables)
train_op_mentee = optimizer_mentee.minimize(loss_mentee,
global_step,
var_list = mentee_variables)
train_op_finetune = optimizer_finetune.minimize(loss_finetune,
global_step,
var_list = mentee_variables)
else:
with tf.variable_scope('mentor_cumulative_loss'):
# Add weight decay and distillation to the loss.
loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor
with tf.variable_scope('mentee_cumulative_loss'):
loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee
with tf.variable_scope('mentee_finetune'):
loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee
train_op_mentor = None
train_op_mentee = None
train_op_finetune = None
with tf.variable_scope('metrics'):
accuracy_mentor = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentor['classes'])
accuracy_mentee = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentee['classes'])
metrics = {'accuracy_mentor': accuracy_mentor,
'accuracy_mentee': accuracy_mentee}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy_mentor[1], name='train_accuracy_mentor')
tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1])
tf.identity(accuracy_mentee[1], name='train_accuracy_mentee')
tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1])
saver=tf.train.Saver(var_list = tf.global_variables())
if trainee == 'mentor':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentor,
loss=loss_mentor,
train_op=train_op_mentor,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'mentee':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_mentee,
train_op=train_op_mentee,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'finetune':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_finetune,
train_op=train_op_finetune,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
def resnet_main(flags, model_function, input_function):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
mentor = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'weight_decay_coeff': flags.weight_decay_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentor'
})
for i in range(flags.train_epochs_mentor // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentor',
'cross_entropy': 'cross_entropy/cross_entropy_mentor' ,
'train_accuracy': 'metrics/train_accuracy_mentor'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentor training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentor.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentor.evaluate(input_fn=input_fn_eval)
print(eval_results)
mentee = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentee'
})
for i in range(flags.train_epochs_mentee // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
'distillation_loss': 'distillery/distillation_loss',
'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentee.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentee.evaluate(input_fn=input_fn_eval)
print(eval_results)
finetune = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'finetune'
})
for i in range(flags.train_epochs_finetune // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee finetune cycle. [' + str(i) + '/'
+ str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']')
print(' *********************** ' )
finetune.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = finetune.evaluate(input_fn=input_fn_eval)
print(eval_results)
class ResnetArgParser(argparse.ArgumentParser):
"""Arguments for configuring and running a Resnet Model.
"""
def __init__(self, resnet_size_choices=None):
super(ResnetArgParser, self).__init__()
self.add_argument(
'--data_dir', type=str, default='./resnet_data',
help='The directory where the input data is stored.')
self.add_argument(
'--num_parallel_calls', type=int, default=5,
help='The number of records that are processed in parallel '
'during input processing. This can be optimized per data set but '
'for generally homogeneous data sets, should be approximately the '
'number of available CPU cores.')
self.add_argument(
'--model_dir', type=str, default='./resnet_model',
help='The directory where the model will be stored.')
self.add_argument(
'--resnet_size_mentor', type=int, default=50,
choices=resnet_size_choices,
help='The size of the ResNet Mentor model to use.')
self.add_argument(
'--resnet_size_mentee', type=int, default=10,
choices=resnet_size_choices,
help='The size of the ResNet Mentee model to use.')
self.add_argument(
'--train_epochs_mentor', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_mentee', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_finetune', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--epochs_per_eval', type=int, default=1,
help='The number of training epochs to run between evaluations.')
self.add_argument(
'--batch_size', type=int, default=32,
help='Batch size for training and evaluation.')
self.add_argument(
'--mentor_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--mentee_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--finetune_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--data_format', type=str, default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. '
'channels_first provides a performance boost on GPU but '
'is not always compatible with CPU. If left unspecified, '
'the data format will be chosen automatically based on '
'whether TensorFlow was built for CPU or GPU.')
self.add_argument(
'--distillation_coeff', type=float, default=0.01,
help='Coefficient of distillation to be applied from parent to'
'child. This is only useful when performing distillaiton.')
self.add_argument(
'--probes_coeff', type=float, default=0.0001,
help='Coefficient of weight to be applied from parent to'
'child. This is only useful when performing mentoring.')
self.add_argument(
'--weight_decay_coeff', type=float, default=0.0002,
help='Coefficient of weight to be applied from to the'
'weight decay regularizer.')
self.add_argument(
'--temperature', type=float, default=3,
help='Temperature to be used for the softmax layer')
self.add_argument(
'--num_probes', type=int, default=0,
help='Number of probes to be used')
self.add_argument(
'--pool_probes', type=int, default=2,
help='Maxpool probes by')
self.add_argument(
'--initial_learning_rate_mentor', type=float, default=0.001,
help='Set initial learning rate for mentor')
self.add_argument(
'--initial_learning_rate_mentee', type=float, default=0.001,
help='Set initial learning rate for mentee')
self.add_argument(
'--initial_learning_rate_finetune', type=float, default=0.001,
help='Set initial learning rate finetune')
self.add_argument(
'--pool_type', type=str, default='max',
help='Pool type for probes.')
|
[] |
[] |
[
"TF_ENABLE_WINOGRAD_NONFUSED"
] |
[]
|
["TF_ENABLE_WINOGRAD_NONFUSED"]
|
python
| 1 | 0 | |
tfx/components/evaluator/executor_test.py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.evaluator.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.components.evaluator import constants
from tfx.components.evaluator import executor
from tfx.proto import evaluator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import json_utils
from google.protobuf import json_format
class ExecutorTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('evaluation_w_eval_config', {
'eval_config':
json_format.MessageToJson(
tfma.EvalConfig(slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
]),
preserving_proto_field_name=True)
}), ('evaluation_w_module_file', {
'eval_config':
json_format.MessageToJson(
tfma.EvalConfig(slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
]),
preserving_proto_field_name=True),
'module_file':
None
}))
def testEvalution(self, exec_properties):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
model = standard_artifacts.Model()
baseline_model = standard_artifacts.Model()
model.uri = os.path.join(source_data_dir, 'trainer/current')
baseline_model.uri = os.path.join(source_data_dir, 'trainer/previous/')
schema = standard_artifacts.Schema()
schema.uri = os.path.join(source_data_dir, 'schema_gen')
input_dict = {
constants.EXAMPLES_KEY: [examples],
constants.MODEL_KEY: [model],
constants.SCHEMA_KEY: [schema],
}
# Create output dict.
eval_output = standard_artifacts.ModelEvaluation()
eval_output.uri = os.path.join(output_data_dir, 'eval_output')
blessing_output = standard_artifacts.ModelBlessing()
blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
output_dict = {
constants.EVALUATION_KEY: [eval_output],
constants.BLESSING_KEY: [blessing_output],
}
# Test multiple splits.
exec_properties[constants.EXAMPLE_SPLITS_KEY] = json_utils.dumps(
['train', 'eval'])
if 'module_file' in exec_properties:
exec_properties['module_file'] = os.path.join(source_data_dir,
'module_file',
'evaluator_module.py')
# Run executor.
evaluator = executor.Executor()
evaluator.Do(input_dict, output_dict, exec_properties)
# Check evaluator outputs.
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'eval_config.json')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'metrics')))
self.assertTrue(tf.io.gfile.exists(os.path.join(eval_output.uri, 'plots')))
self.assertFalse(
tf.io.gfile.exists(os.path.join(blessing_output.uri, 'BLESSED')))
@parameterized.named_parameters(('legacy_feature_slicing', {
'feature_slicing_spec':
json_format.MessageToJson(
evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour']),
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_day', 'trip_miles']),
]),
preserving_proto_field_name=True),
}))
def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
model = standard_artifacts.Model()
model.uri = os.path.join(source_data_dir, 'trainer/current')
input_dict = {
constants.EXAMPLES_KEY: [examples],
constants.MODEL_KEY: [model],
}
# Create output dict.
eval_output = standard_artifacts.ModelEvaluation()
eval_output.uri = os.path.join(output_data_dir, 'eval_output')
blessing_output = standard_artifacts.ModelBlessing()
blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
output_dict = {
constants.EVALUATION_KEY: [eval_output],
constants.BLESSING_KEY: [blessing_output],
}
try:
# Need to import the following module so that the fairness indicator
# post-export metric is registered. This may raise an ImportError if the
# currently-installed version of TFMA does not support fairness
# indicators.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # pylint: disable=g-import-not-at-top, unused-variable
exec_properties['fairness_indicator_thresholds'] = [
0.1, 0.3, 0.5, 0.7, 0.9
]
except ImportError:
logging.warning(
'Not testing fairness indicators because a compatible TFMA version '
'is not installed.')
# List needs to be serialized before being passed into Do function.
exec_properties[constants.EXAMPLE_SPLITS_KEY] = json_utils.dumps(None)
# Run executor.
evaluator = executor.Executor()
evaluator.Do(input_dict, output_dict, exec_properties)
# Check evaluator outputs.
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'eval_config.json')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'metrics')))
self.assertTrue(tf.io.gfile.exists(os.path.join(eval_output.uri, 'plots')))
self.assertFalse(
tf.io.gfile.exists(os.path.join(blessing_output.uri, 'BLESSED')))
@parameterized.named_parameters(
(
'eval_config_w_validation',
{
'eval_config':
json_format.MessageToJson(
tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.config.MetricConfig(
class_name='ExampleCount',
# Count > 0, OK.
threshold=tfma.config.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
lower_bound={'value': 0}))),
]),
],
slicing_specs=[tfma.SlicingSpec()]),
preserving_proto_field_name=True)
},
True,
True),
(
'eval_config_w_validation_fail',
{
'eval_config':
json_format.MessageToJson(
tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
name='baseline1',
label_key='tips',
is_baseline=True),
tfma.ModelSpec(
name='candidate1', label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.config.MetricConfig(
class_name='ExampleCount',
# Count < -1, NOT OK.
threshold=tfma.config.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
upper_bound={'value': -1}))),
]),
],
slicing_specs=[tfma.SlicingSpec()]),
preserving_proto_field_name=True)
},
False,
True),
(
'no_baseline_model_ignore_change_threshold_validation_pass',
{
'eval_config':
json_format.MessageToJson(
tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
name='baseline',
label_key='tips',
is_baseline=True),
tfma.ModelSpec(
name='candidate', label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.config.MetricConfig(
class_name='ExampleCount',
# Count > 0, OK.
threshold=tfma.config.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
lower_bound={'value': 0}))),
tfma.config.MetricConfig(
class_name='Accuracy',
# Should be ignored due to no baseline.
threshold=tfma.config.MetricThreshold(
change_threshold=tfma
.GenericChangeThreshold(
relative={'value': 0},
direction=tfma.MetricDirection
.LOWER_IS_BETTER))),
]),
],
slicing_specs=[tfma.SlicingSpec()]),
preserving_proto_field_name=True)
},
True,
False))
def testDoValidation(self, exec_properties, blessed, has_baseline):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
model = standard_artifacts.Model()
baseline_model = standard_artifacts.Model()
model.uri = os.path.join(source_data_dir, 'trainer/current')
baseline_model.uri = os.path.join(source_data_dir, 'trainer/previous/')
blessing_output = standard_artifacts.ModelBlessing()
blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
schema = standard_artifacts.Schema()
schema.uri = os.path.join(source_data_dir, 'schema_gen')
input_dict = {
constants.EXAMPLES_KEY: [examples],
constants.MODEL_KEY: [model],
constants.SCHEMA_KEY: [schema],
}
if has_baseline:
input_dict[constants.BASELINE_MODEL_KEY] = [baseline_model]
# Create output dict.
eval_output = standard_artifacts.ModelEvaluation()
eval_output.uri = os.path.join(output_data_dir, 'eval_output')
blessing_output = standard_artifacts.ModelBlessing()
blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
output_dict = {
constants.EVALUATION_KEY: [eval_output],
constants.BLESSING_KEY: [blessing_output],
}
# List needs to be serialized before being passed into Do function.
exec_properties[constants.EXAMPLE_SPLITS_KEY] = json_utils.dumps(None)
# Run executor.
evaluator = executor.Executor()
evaluator.Do(input_dict, output_dict, exec_properties)
# Check evaluator outputs.
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'eval_config.json')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'metrics')))
self.assertTrue(tf.io.gfile.exists(os.path.join(eval_output.uri, 'plots')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(eval_output.uri, 'validations')))
if blessed:
self.assertTrue(
tf.io.gfile.exists(os.path.join(blessing_output.uri, 'BLESSED')))
else:
self.assertTrue(
tf.io.gfile.exists(os.path.join(blessing_output.uri, 'NOT_BLESSED')))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
[] |
[] |
[
"TEST_UNDECLARED_OUTPUTS_DIR"
] |
[]
|
["TEST_UNDECLARED_OUTPUTS_DIR"]
|
python
| 1 | 0 | |
nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Platform/darwin.py
|
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
from . import posix
import os
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
# Not sure why this wasn't the case all along?
if env['ENV'].get('PATHOSX', False) and os.environ.get('SCONS_USE_MAC_PATHS', False):
env.AppendENVPath('PATH',env['ENV']['PATHOSX'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[] |
[] |
[
"SCONS_USE_MAC_PATHS"
] |
[]
|
["SCONS_USE_MAC_PATHS"]
|
python
| 1 | 0 | |
libs/gspread/auth.py
|
# -*- coding: utf-8 -*-
"""
gspread.auth
~~~~~~~~~~~~
Simple authentication with OAuth.
"""
import os
from google.oauth2.credentials import Credentials
from google.oauth2.service_account import (
Credentials as ServiceAccountCredentials,
)
from google_auth_oauthlib.flow import InstalledAppFlow
from .client import Client
DEFAULT_SCOPES = [
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive',
]
READONLY_SCOPES = [
'https://www.googleapis.com/auth/spreadsheets.readonly',
'https://www.googleapis.com/auth/drive.readonly',
]
def get_config_dir(config_dir_name='gspread', os_is_windows=os.name == 'nt'):
"""Construct a config dir path.
By default:
* `%APPDATA%\gspread` on Windows
* `~/.config/gspread` everywhere else
"""
if os_is_windows:
return os.path.join(os.environ["APPDATA"], config_dir_name)
else:
return os.path.join(
os.path.expanduser('~'), '.config', config_dir_name
)
DEFAULT_CONFIG_DIR = get_config_dir()
DEFAULT_CREDENTIALS_FILENAME = os.path.join(
DEFAULT_CONFIG_DIR, 'credentials.json'
)
DEFAULT_AUTHORIZED_USER_FILENAME = os.path.join(
DEFAULT_CONFIG_DIR, 'authorized_user.json'
)
DEFAULT_SERVICE_ACCOUNT_FILENAME = os.path.join(
DEFAULT_CONFIG_DIR, 'service_account.json'
)
def _create_installed_app_flow(scopes):
return InstalledAppFlow.from_client_secrets_file(
DEFAULT_CREDENTIALS_FILENAME, scopes
)
def local_server_flow(scopes, port=0):
flow = _create_installed_app_flow(scopes)
return flow.run_local_server(port=port)
def console_flow(scopes):
flow = _create_installed_app_flow(scopes)
return flow.run_console()
def load_credentials(filename=DEFAULT_AUTHORIZED_USER_FILENAME):
if os.path.exists(filename):
return Credentials.from_authorized_user_file(filename)
return None
def store_credentials(
creds, filename=DEFAULT_AUTHORIZED_USER_FILENAME, strip='token'
):
with open(filename, 'w') as f:
f.write(creds.to_json(strip))
def oauth(scopes=DEFAULT_SCOPES, flow=local_server_flow):
"""Authenticate with OAuth Client ID.
:param list scopes: The scopes used to obtain authorization.
:param function flow: OAuth flow to use for authentication.
:rtype: :class:`gspread.client.Client`
"""
creds = load_credentials()
if not creds:
creds = flow(scopes=scopes)
store_credentials(creds)
client = Client(auth=creds)
return client
def service_account(
filename=DEFAULT_SERVICE_ACCOUNT_FILENAME, scopes=DEFAULT_SCOPES
):
"""Authenticate using a service account.
:param str filename: The path to the service account json file.
:param list scopes: The scopes used to obtain authorization.
:rtype: :class:`gspread.client.Client`
"""
creds = ServiceAccountCredentials.from_service_account_file(
filename, scopes=scopes
)
return Client(auth=creds)
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
tests/test_flair_encoder.py
|
import os
import unittest
from gnes.encoder.text.flair import FlairEncoder
class TestFlairEncoder(unittest.TestCase):
def setUp(self):
dirname = os.path.dirname(__file__)
self.dump_path = os.path.join(dirname, 'flair_encoder.bin')
self.test_str = []
with open(os.path.join(dirname, 'sonnets.txt')) as f:
for line in f:
line = line.strip()
if line:
self.test_str.append(line)
self.flair_encoder = FlairEncoder(model_name=os.environ.get('FLAIR_CI_MODEL'))
@unittest.SkipTest
def test_encoding(self):
vec = self.flair_encoder.encode(self.test_str[:2])
print(vec.shape)
self.assertEqual(vec.shape[0], 2)
self.assertEqual(vec.shape[1], 4196)
@unittest.SkipTest
def test_dump_load(self):
self.flair_encoder.dump(self.dump_path)
flair_encoder2 = FlairEncoder.load(self.dump_path)
vec = flair_encoder2.encode(self.test_str)
self.assertEqual(vec.shape[0], len(self.test_str))
self.assertEqual(vec.shape[1], 512)
def tearDown(self):
if os.path.exists(self.dump_path):
os.remove(self.dump_path)
|
[] |
[] |
[
"FLAIR_CI_MODEL"
] |
[]
|
["FLAIR_CI_MODEL"]
|
python
| 1 | 0 | |
ch09/qos.py
|
import os
import sys
import requests
base_url = "https://api.meraki.com/api/v1"
key = os.environ.get("MERAKI_DASHBOARD_API_KEY", None)
if key is None:
print("Please provide an API key. Aborting.")
sys.exit(-1)
sess = requests.Session()
sess.headers.update({
"X-Cisco-Meraki-API-Key": key
})
network_id = "L_783626335162466515"
url = f"{base_url}/networks/{network_id}/switch/qosRules"
resp = sess.get(url)
if resp.status_code == 200:
rules = resp.json()
for rule in rules:
url_del = f"{base_url}/networks/{network_id}/switch/qosRules/{rule['id']}"
resp_del = sess.delete(url_del)
if resp_del.status_code == 204:
print(f"Deleted QoS rule {rule['id']}")
else:
print(f"Failed on delete request. Status: {resp_del.status_code}")
else:
print(f"Failed to retrieve rules. Status: {resp.status_code}")
|
[] |
[] |
[
"MERAKI_DASHBOARD_API_KEY"
] |
[]
|
["MERAKI_DASHBOARD_API_KEY"]
|
python
| 1 | 0 | |
tests/test_ftx_private.py
|
import json
import os
import time
import ccxt
import pandas as pd
from unittest import TestCase, mock
from crypto_data_fetcher.ftx import FtxFetcher
def ftx_config():
path = os.getenv("HOME") + '/.ftx.json'
with open(path) as f:
return json.load(f)
def create_ccxt_client():
headers = {
'FTX-SUBACCOUNT': 'bottest'
}
return ccxt.ftx({
'apiKey': ftx_config()['key'],
'secret': ftx_config()['secret'],
'enableRateLimit': True,
'headers': headers,
})
class TestFtxPrivate(TestCase):
def test_fetch_my_trades(self):
ftx = create_ccxt_client()
fetcher = FtxFetcher(ccxt_client=ftx)
df = fetcher.fetch_my_trades(
market='BTC-PERP',
)
print(df)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
cmd/ls.go
|
package cmd
import (
"io/ioutil"
"os"
"strings"
"github.com/gabrie30/ghorg/colorlog"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(lsCmd)
}
var lsCmd = &cobra.Command{
Use: "ls [dir]",
Short: "List contents of your ghorg home or ghorg directories",
Long: `If no dir is specified it will list contents of GHORG_ABSOLUTE_PATH_TO_CLONE_TO. When specifying a dir you can omit _ghorg`,
Run: lsFunc,
}
func lsFunc(cmd *cobra.Command, argz []string) {
if cmd.Flags().Changed("color") {
colorToggle := cmd.Flag("color").Value.String()
if colorToggle == "on" {
os.Setenv("GHORG_COLOR", colorToggle)
} else {
os.Setenv("GHORG_COLOR", "off")
}
}
if len(argz) == 0 {
listGhorgHome()
}
if len(argz) >= 1 {
for _, arg := range argz {
listGhorgDir(arg)
}
}
}
func listGhorgHome() {
ghorgDir := os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO")
files, err := ioutil.ReadDir(ghorgDir)
if err != nil {
colorlog.PrintError("No clones found. Please clone some and try again.")
}
for _, f := range files {
if f.IsDir() {
colorlog.PrintInfo(ghorgDir + f.Name())
}
}
}
func listGhorgDir(arg string) {
if !strings.HasSuffix(arg, "_ghorg") {
arg = arg + "_ghorg"
}
arg = strings.ReplaceAll(arg, "-", "_")
ghorgDir := os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + arg
files, err := ioutil.ReadDir(ghorgDir)
if err != nil {
colorlog.PrintError("No clone found with that name. Please check spelling or reclone.")
}
for _, f := range files {
if f.IsDir() {
colorlog.PrintSubtleInfo(ghorgDir + "/" + f.Name())
}
}
}
|
[
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\""
] |
[] |
[
"GHORG_ABSOLUTE_PATH_TO_CLONE_TO"
] |
[]
|
["GHORG_ABSOLUTE_PATH_TO_CLONE_TO"]
|
go
| 1 | 0 | |
auth/jwt.go
|
package auth
import (
"fmt"
"os"
"time"
"github.com/dgrijalva/jwt-go"
)
// SigningKey is a secret key to store in jwt claims
var SigningKey = []byte(os.Getenv("MYCAP_JWT_TOKEN"))
// GenerateJWT creates JWT token from payload
func GenerateJWT(name, email string) (string, error) {
token := jwt.New(jwt.SigningMethodHS256)
claims := token.Claims.(jwt.MapClaims)
claims["name"] = name
claims["email"] = email
claims["issued"] = time.Now().Unix()
claims["exp"] = time.Now().Add(time.Hour * 72).Unix()
t, err := token.SignedString(SigningKey)
if err != nil {
return "", fmt.Errorf("Failed to generate JWT")
}
return t, nil
}
|
[
"\"MYCAP_JWT_TOKEN\""
] |
[] |
[
"MYCAP_JWT_TOKEN"
] |
[]
|
["MYCAP_JWT_TOKEN"]
|
go
| 1 | 0 | |
src/app/views/htmlSignin.py
|
#!/usr/bin/python3
import os
from dotenv import load_dotenv
from .cssStatic import css
load_dotenv()
# These variables are used as settings
PORT = int(os.getenv('PORT'))
IP_ADDRESS = os.getenv('IP_ADDRESS')
MSG = ""
# The login page
html_signin_old = """
<html>
<body>
<b>Login Form</b>
<form method="POST" action="do_signin">
Username: <input type="text" name="username"><br>
Password: <input type="password" name="password"><br>
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
html_signin = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Login</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.7.1/css/all.css">
</head>
<style>
{3}
</style>
<body>
<div class="login">
<h1>Login</h1>
<div class="links">
<a class='active'>Signin</a>
<a href="http://{0}:{1}/signup">Signup</a>
</div>
<form method="POST" action="do_signin">
<label for="username">
<i class="fas fa-user"></i>
</label>
<input type="text" name="username" placeholder="Username" id="username" required>
<label for="password">
<i class="fas fa-lock"></i>
</label>
<input type="password" name="password" placeholder="Password" id="password" required>
<div class="msg">
<h3>{2}</h3>
</div>
<input type="submit" value="Login">
</form>
</div>
</body>
</html>
""".format(IP_ADDRESS, PORT, MSG, css)
def getViewSigninEncode(msg = ""):
global MSG
MSG = msg
return html_signin.encode()
|
[] |
[] |
[
"PORT",
"IP_ADDRESS"
] |
[]
|
["PORT", "IP_ADDRESS"]
|
python
| 2 | 0 | |
cmd/server.go
|
package cmd
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"time"
"github.com/codigician/remote-code-execution/internal/codexec"
"github.com/codigician/remote-code-execution/internal/handler"
"github.com/codigician/remote-code-execution/internal/rc"
"github.com/codigician/remote-code-execution/pkg/config"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/labstack/echo/v4"
"github.com/urfave/cli/v2"
)
const (
_shutdownTimeoutDuration = time.Second * 5
_balancerIntervalDuration = time.Second * 10
defaultPort = 8888
)
func CommandServe() *cli.Command {
return &cli.Command{
Name: "serve",
Usage: "Run RCEE HTTP application",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Usage: "Give the port that you want to run the application",
Value: defaultPort,
},
},
Action: startServer,
}
}
func startServer(c *cli.Context) error {
var (
containerConfig container.Config
containerHostConfig container.HostConfig
env = os.Getenv("APP_ENV")
)
if err := config.Read(fmt.Sprintf(".config/%s.yml", env), &containerConfig, &containerHostConfig); err != nil {
return err
}
dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return err
}
e := echo.New()
containerClient := rc.NewClient(dockerClient, &containerConfig)
codexecService := codexec.New(containerClient, &containerHostConfig, codexec.WriteFile)
codexecHandler := handler.NewRemoteCodeExecutor(codexecService)
codexecHandler.RegisterRoutes(e)
pool := codexec.NewContainerPool()
ticker := time.NewTicker(_balancerIntervalDuration)
balancerService := codexec.NewContainerBalancer(containerClient, pool, &containerHostConfig, codexecService)
balancerService.FillPool(context.Background())
go balancerService.Balance(ticker)
balancerHandler := handler.NewBalancer(balancerService)
balancerHandler.RegisterRoutes(e)
go func() {
if err := e.Start(fmt.Sprintf(":%d", c.Int("port"))); err != nil {
log.Println("server err", err)
}
}()
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt)
<-quit
ctx, cancel := context.WithTimeout(context.Background(), _shutdownTimeoutDuration)
defer cancel()
balancerService.Shutdown(ctx)
ticker.Stop()
return e.Shutdown(ctx)
}
|
[
"\"APP_ENV\""
] |
[] |
[
"APP_ENV"
] |
[]
|
["APP_ENV"]
|
go
| 1 | 0 | |
fbpcs/pcf/tests/test_private_computation_framework.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from fbpcs.pcf.games import ConversionLift
from fbpcs.pcf.mpc.tests.utils import MPCTestCase
from fbpcs.pcf.private_computation_framework import PrivateComputationFramework
from fbpcs.pcf.structs import Role, Status
from fbpcs.pcf.tests.async_utils import wait
from fbpcs.pcf.tests.utils import DummyGame, DummyMPCFramework, DummyPlayer
TEST_RUN_TIMEOUT = 5678
TEST_SLEEP_SECONDS = 0
class TestPrivateComputationFramework(MPCTestCase):
def setUp(self):
os.environ["RUN_TIMEOUT"] = str(TEST_RUN_TIMEOUT)
self.game = DummyGame
self.player = DummyPlayer.build(Role.PUBLISHER)
self.other_players = [DummyPlayer.build(Role.PARTNER)]
num_files = 2
_, self.input_files = zip(
*[
self._make_input_csv(
game=ConversionLift, role=Role.PUBLISHER, num_records=10
)
for i in range(num_files)
]
)
self.output_files = self.input_files
self.tempdirs = [f.parent for f in self.input_files]
self.pcf = PrivateComputationFramework(
game=self.game,
input_files=self.input_files,
output_files=self.output_files,
player=self.player,
other_players=self.other_players,
mpc_cls=DummyMPCFramework,
partner_sleep_seconds=TEST_SLEEP_SECONDS,
)
self.pcf_partner = PrivateComputationFramework(
game=self.game,
input_files=self.input_files,
output_files=self.output_files,
player=DummyPlayer.build(Role.PARTNER),
other_players=[DummyPlayer.build(Role.PUBLISHER)],
mpc_cls=DummyMPCFramework,
partner_sleep_seconds=TEST_SLEEP_SECONDS,
)
def tearDown(self):
for tempdir in self.tempdirs:
shutil.rmtree(tempdir)
def test_gen_frameworks(self):
for i, fw in enumerate(self.pcf.mpc_frameworks):
self.assertTrue(isinstance(fw, DummyMPCFramework))
self.assertEqual(self.game, fw.game)
self.assertEqual(self.input_files[i], fw.input_file)
self.assertEqual(self.player, fw.player)
self.assertEqual(self.other_players, fw.other_players)
self.assertEqual(TEST_RUN_TIMEOUT, fw.run_timeout)
def test_prepare_input(self):
for status in Status:
for fw in self.pcf.mpc_frameworks:
fw.build(prepare_input=status)
self.assertEqual(status, wait(self.pcf.prepare_input()))
def test_run_mpc(self):
expected_1 = {"key1": 1.0, "key2": 2.5, "key3": 99.9}
expected_2 = {"key1": 9.0, "key2": 10.5, "key3": 199.9}
self.assertEqual(2, len(self.pcf.mpc_frameworks))
self.pcf.mpc_frameworks[0].build(run_mpc=expected_1.copy())
self.pcf.mpc_frameworks[1].build(run_mpc=expected_2.copy())
self.assertEqual(expected_1, wait(self.pcf.run_mpc())[0])
self.assertEqual(expected_2, wait(self.pcf.run_mpc())[1])
# Test on partner player too because it has a different logic in run_mpc
self.assertEqual(2, len(self.pcf_partner.mpc_frameworks))
self.pcf_partner.mpc_frameworks[0].build(run_mpc=expected_1.copy())
self.pcf_partner.mpc_frameworks[1].build(run_mpc=expected_2.copy())
self.assertEqual(expected_1, wait(self.pcf_partner.run_mpc())[0])
self.assertEqual(expected_2, wait(self.pcf_partner.run_mpc())[1])
|
[] |
[] |
[
"RUN_TIMEOUT"
] |
[]
|
["RUN_TIMEOUT"]
|
python
| 1 | 0 | |
util/pkg/vfs/osscontext.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vfs
import (
"fmt"
"os"
"strings"
"github.com/denverdino/aliyungo/metadata"
"github.com/denverdino/aliyungo/oss"
)
type aliyunOSSConfig struct {
region oss.Region
internal bool
accessKeyID string
accessKeySecret string
securityToken string
secure bool
}
func NewOSSPath(client *oss.Client, bucket string, key string) (*OSSPath, error) {
bucket = strings.TrimSuffix(bucket, "/")
key = strings.TrimPrefix(key, "/")
return &OSSPath{
client: client,
bucket: bucket,
key: key,
}, nil
}
func NewAliOSSClient() (*oss.Client, error) {
c := &aliyunOSSConfig{}
err := c.loadConfig()
if err != nil {
return nil, fmt.Errorf("error building aliyun oss client: %v", err)
}
if c.securityToken != "" {
return oss.NewOSSClientForAssumeRole(c.region, c.internal, c.accessKeyID, c.accessKeySecret, c.securityToken, c.secure), nil
}
return oss.NewOSSClient(c.region, c.internal, c.accessKeyID, c.accessKeySecret, c.secure), nil
}
func (c *aliyunOSSConfig) loadConfig() error {
meta := metadata.NewMetaData(nil)
c.region = oss.Region(os.Getenv("OSS_REGION"))
if c.region == "" {
region, err := meta.Region()
if err != nil {
return fmt.Errorf("can't get region-id from ECS metadata")
}
c.region = oss.Region(fmt.Sprintf("oss-%s", region))
}
c.accessKeyID = os.Getenv("ALIYUN_ACCESS_KEY_ID")
if c.accessKeyID != "" {
c.accessKeySecret = os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
if c.accessKeySecret == "" {
return fmt.Errorf("ALIYUN_ACCESS_KEY_SECRET cannot be empty")
}
} else {
role, err := meta.RoleName()
if err != nil {
return fmt.Errorf("Can't find role from ECS metadata: %s", err)
}
roleAuth, err := meta.RamRoleToken(role)
if err != nil {
return fmt.Errorf("Can't get role token: %s", err)
}
c.accessKeyID = roleAuth.AccessKeyId
c.accessKeySecret = roleAuth.AccessKeySecret
c.securityToken = roleAuth.SecurityToken
}
ossInternal := os.Getenv("ALIYUN_OSS_INTERNAL")
if ossInternal != "" {
c.internal = true
} else {
c.internal = false
}
c.secure = true
return nil
}
|
[
"\"OSS_REGION\"",
"\"ALIYUN_ACCESS_KEY_ID\"",
"\"ALIYUN_ACCESS_KEY_SECRET\"",
"\"ALIYUN_OSS_INTERNAL\""
] |
[] |
[
"ALIYUN_ACCESS_KEY_SECRET",
"OSS_REGION",
"ALIYUN_OSS_INTERNAL",
"ALIYUN_ACCESS_KEY_ID"
] |
[]
|
["ALIYUN_ACCESS_KEY_SECRET", "OSS_REGION", "ALIYUN_OSS_INTERNAL", "ALIYUN_ACCESS_KEY_ID"]
|
go
| 4 | 0 | |
misc/cgo/testplugin/plugin_test.go
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package plugin_test
import (
"bytes"
"context"
"flag"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
)
var gcflags string = os.Getenv("GO_GCFLAGS")
var goroot string
func TestMain(m *testing.M) {
flag.Parse()
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
fmt.Printf("SKIP - short mode and $GO_BUILDER_NAME not set\n")
os.Exit(0)
}
log.SetFlags(log.Lshortfile)
os.Exit(testMain(m))
}
// tmpDir is used to cleanup logged commands -- s/tmpDir/$TMPDIR/
var tmpDir string
// prettyPrintf prints lines with tmpDir sanitized.
func prettyPrintf(format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
if tmpDir != "" {
s = strings.ReplaceAll(s, tmpDir, "$TMPDIR")
}
fmt.Print(s)
}
func testMain(m *testing.M) int {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
goroot = filepath.Join(cwd, "../../..")
// Copy testdata into GOPATH/src/testplugin, along with a go.mod file
// declaring the same path.
GOPATH, err := os.MkdirTemp("", "plugin_test")
if err != nil {
log.Panic(err)
}
defer os.RemoveAll(GOPATH)
tmpDir = GOPATH
modRoot := filepath.Join(GOPATH, "src", "testplugin")
altRoot := filepath.Join(GOPATH, "alt", "src", "testplugin")
for srcRoot, dstRoot := range map[string]string{
"testdata": modRoot,
filepath.Join("altpath", "testdata"): altRoot,
} {
if err := overlayDir(dstRoot, srcRoot); err != nil {
log.Panic(err)
}
prettyPrintf("mkdir -p %s\n", dstRoot)
prettyPrintf("rsync -a %s/ %s\n", srcRoot, dstRoot)
if err := os.WriteFile(filepath.Join(dstRoot, "go.mod"), []byte("module testplugin\n"), 0666); err != nil {
log.Panic(err)
}
prettyPrintf("echo 'module testplugin' > %s/go.mod\n", dstRoot)
}
os.Setenv("GOPATH", filepath.Join(GOPATH, "alt"))
if err := os.Chdir(altRoot); err != nil {
log.Panic(err)
} else {
prettyPrintf("cd %s\n", altRoot)
}
os.Setenv("PWD", altRoot)
goCmd(nil, "build", "-buildmode=plugin", "-o", filepath.Join(modRoot, "plugin-mismatch.so"), "./plugin-mismatch")
os.Setenv("GOPATH", GOPATH)
if err := os.Chdir(modRoot); err != nil {
log.Panic(err)
} else {
prettyPrintf("cd %s\n", modRoot)
}
os.Setenv("PWD", modRoot)
os.Setenv("LD_LIBRARY_PATH", modRoot)
goCmd(nil, "build", "-buildmode=plugin", "./plugin1")
goCmd(nil, "build", "-buildmode=plugin", "./plugin2")
so, err := os.ReadFile("plugin2.so")
if err != nil {
log.Panic(err)
}
if err := os.WriteFile("plugin2-dup.so", so, 0444); err != nil {
log.Panic(err)
}
prettyPrintf("cp plugin2.so plugin2-dup.so\n")
goCmd(nil, "build", "-buildmode=plugin", "-o=sub/plugin1.so", "./sub/plugin1")
goCmd(nil, "build", "-buildmode=plugin", "-o=unnamed1.so", "./unnamed1/main.go")
goCmd(nil, "build", "-buildmode=plugin", "-o=unnamed2.so", "./unnamed2/main.go")
goCmd(nil, "build", "-o", "host.exe", "./host")
return m.Run()
}
func goCmd(t *testing.T, op string, args ...string) {
if t != nil {
t.Helper()
}
run(t, filepath.Join(goroot, "bin", "go"), append([]string{op, "-gcflags", gcflags}, args...)...)
}
// escape converts a string to something suitable for a shell command line.
func escape(s string) string {
s = strings.Replace(s, "\\", "\\\\", -1)
s = strings.Replace(s, "'", "\\'", -1)
// Conservative guess at characters that will force quoting
if s == "" || strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") {
s = "'" + s + "'"
}
return s
}
// asCommandLine renders cmd as something that could be copy-and-pasted into a command line
func asCommandLine(cwd string, cmd *exec.Cmd) string {
s := "("
if cmd.Dir != "" && cmd.Dir != cwd {
s += "cd" + escape(cmd.Dir) + ";"
}
for _, e := range cmd.Env {
if !strings.HasPrefix(e, "PATH=") &&
!strings.HasPrefix(e, "HOME=") &&
!strings.HasPrefix(e, "USER=") &&
!strings.HasPrefix(e, "SHELL=") {
s += " "
s += escape(e)
}
}
// These EVs are relevant to this test.
for _, e := range os.Environ() {
if strings.HasPrefix(e, "PWD=") ||
strings.HasPrefix(e, "GOPATH=") ||
strings.HasPrefix(e, "LD_LIBRARY_PATH=") {
s += " "
s += escape(e)
}
}
for _, a := range cmd.Args {
s += " "
s += escape(a)
}
s += " )"
return s
}
func run(t *testing.T, bin string, args ...string) string {
cmd := exec.Command(bin, args...)
cmdLine := asCommandLine(".", cmd)
prettyPrintf("%s\n", cmdLine)
cmd.Stderr = new(strings.Builder)
out, err := cmd.Output()
if err != nil {
if t == nil {
log.Panicf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr)
} else {
t.Helper()
t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr)
}
}
return string(bytes.TrimSpace(out))
}
func TestDWARFSections(t *testing.T) {
// test that DWARF sections are emitted for plugins and programs importing "plugin"
goCmd(t, "run", "./checkdwarf/main.go", "plugin2.so", "plugin2.UnexportedNameReuse")
goCmd(t, "run", "./checkdwarf/main.go", "./host.exe", "main.main")
}
func TestRunHost(t *testing.T) {
run(t, "./host.exe")
}
func TestUniqueTypesAndItabs(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "./iface_a")
goCmd(t, "build", "-buildmode=plugin", "./iface_b")
goCmd(t, "build", "-o", "iface.exe", "./iface")
run(t, "./iface.exe")
}
func TestIssue18676(t *testing.T) {
// make sure we don't add the same itab twice.
// The buggy code hangs forever, so use a timeout to check for that.
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue18676/plugin.go")
goCmd(t, "build", "-o", "issue18676.exe", "./issue18676/main.go")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "./issue18676.exe")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, out)
}
}
func TestIssue19534(t *testing.T) {
// Test that we can load a plugin built in a path with non-alpha characters.
goCmd(t, "build", "-buildmode=plugin", "-gcflags=-p=issue.19534", "-ldflags=-pluginpath=issue.19534", "-o", "plugin.so", "./issue19534/plugin.go")
goCmd(t, "build", "-o", "issue19534.exe", "./issue19534/main.go")
run(t, "./issue19534.exe")
}
func TestIssue18584(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue18584/plugin.go")
goCmd(t, "build", "-o", "issue18584.exe", "./issue18584/main.go")
run(t, "./issue18584.exe")
}
func TestIssue19418(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-ldflags=-X main.Val=linkstr", "-o", "plugin.so", "./issue19418/plugin.go")
goCmd(t, "build", "-o", "issue19418.exe", "./issue19418/main.go")
run(t, "./issue19418.exe")
}
func TestIssue19529(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue19529/plugin.go")
}
func TestIssue22175(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "issue22175_plugin1.so", "./issue22175/plugin1.go")
goCmd(t, "build", "-buildmode=plugin", "-o", "issue22175_plugin2.so", "./issue22175/plugin2.go")
goCmd(t, "build", "-o", "issue22175.exe", "./issue22175/main.go")
run(t, "./issue22175.exe")
}
func TestIssue22295(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "issue.22295.so", "./issue22295.pkg")
goCmd(t, "build", "-o", "issue22295.exe", "./issue22295.pkg/main.go")
run(t, "./issue22295.exe")
}
func TestIssue24351(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "issue24351.so", "./issue24351/plugin.go")
goCmd(t, "build", "-o", "issue24351.exe", "./issue24351/main.go")
run(t, "./issue24351.exe")
}
func TestIssue25756(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin")
goCmd(t, "build", "-o", "issue25756.exe", "./issue25756/main.go")
// Fails intermittently, but 20 runs should cause the failure
for n := 20; n > 0; n-- {
t.Run(fmt.Sprint(n), func(t *testing.T) {
t.Parallel()
run(t, "./issue25756.exe")
})
}
}
// Test with main using -buildmode=pie with plugin for issue #43228
func TestIssue25756pie(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin")
goCmd(t, "build", "-buildmode=pie", "-o", "issue25756pie.exe", "./issue25756/main.go")
run(t, "./issue25756pie.exe")
}
func TestMethod(t *testing.T) {
// Exported symbol's method must be live.
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./method/plugin.go")
goCmd(t, "build", "-o", "method.exe", "./method/main.go")
run(t, "./method.exe")
}
func TestMethod2(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "method2.so", "./method2/plugin.go")
goCmd(t, "build", "-o", "method2.exe", "./method2/main.go")
run(t, "./method2.exe")
}
func TestMethod3(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "method3.so", "./method3/plugin.go")
goCmd(t, "build", "-o", "method3.exe", "./method3/main.go")
run(t, "./method3.exe")
}
func TestIssue44956(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p1.so", "./issue44956/plugin1.go")
goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p2.so", "./issue44956/plugin2.go")
goCmd(t, "build", "-o", "issue44956.exe", "./issue44956/main.go")
run(t, "./issue44956.exe")
}
func TestIssue52937(t *testing.T) {
goCmd(t, "build", "-buildmode=plugin", "-o", "issue52937.so", "./issue52937/main.go")
}
func TestForkExec(t *testing.T) {
// Issue 38824: importing the plugin package causes it hang in forkExec on darwin.
t.Parallel()
goCmd(t, "build", "-o", "forkexec.exe", "./forkexec/main.go")
var cmd *exec.Cmd
done := make(chan int, 1)
go func() {
for i := 0; i < 100; i++ {
cmd = exec.Command("./forkexec.exe", "1")
err := cmd.Run()
if err != nil {
t.Errorf("running command failed: %v", err)
break
}
}
done <- 1
}()
select {
case <-done:
case <-time.After(5 * time.Minute):
cmd.Process.Kill()
t.Fatalf("subprocess hang")
}
}
|
[
"\"GO_GCFLAGS\"",
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME",
"GO_GCFLAGS"
] |
[]
|
["GO_BUILDER_NAME", "GO_GCFLAGS"]
|
go
| 2 | 0 | |
templates/stm32f407-atk-explorer/osconfig.py
|
import os
# toolchains options
ARCH = 'arm'
CPU = 'cortex-m4'
CROSS_TOOL = 'gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('OS_CC'):
CROSS_TOOL = os.getenv('OS_CC')
if os.getenv('OS_ROOT'):
OS_ROOT = os.getenv('OS_ROOT')
# cross_tool provides the cross compiler
# COMPILER_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
COMPILER = 'gcc'
COMPILER_PATH = ''
elif CROSS_TOOL == 'keil':
COMPILER = 'armcc'
# Notice: The installation path of armcc cannot have Chinese
COMPILER_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
COMPILER = 'iar'
COMPILER_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
#BUILD = 'release'
BUILD = 'debug'
if COMPILER == 'gcc':
# toolchains
if COMPILER_PATH == '':
COMPILER_PATH = os.getenv('OS_EXEC_PATH')
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
RESULT_SUFFIX = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=oneos.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -R .reserved_ram -O binary $TARGET oneos.bin\n' + SIZE + ' $TARGET \n'
elif COMPILER == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
RESULT_SUFFIX = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --split_sections --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list oneos.map --strict'
CFLAGS += ' -I "' + COMPILER_PATH + '/ARM/ARMCC/include"'
LFLAGS += ' --libpath="' + COMPILER_PATH + '/ARM/ARMCC/lib"'
#CFLAGS += ' -D__MICROLIB '
#AFLAGS += ' --pd "__MICROLIB SETA 1" '
#LFLAGS += ' --library_type=microlib '
COMPILER_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = 'fromelf --bin $TARGET --output oneos.bin \nfromelf -z $TARGET'
elif COMPILER == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
RESULT_SUFFIX = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + COMPILER_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
COMPILER_PATH = COMPILER_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET oneos.bin'
|
[] |
[] |
[
"OS_ROOT",
"OS_EXEC_PATH",
"OS_CC"
] |
[]
|
["OS_ROOT", "OS_EXEC_PATH", "OS_CC"]
|
python
| 3 | 0 | |
src/testing/host-target-testing/artifacts/build.go
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package artifacts
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"golang.org/x/crypto/ssh"
"go.fuchsia.dev/fuchsia/src/testing/host-target-testing/avb"
"go.fuchsia.dev/fuchsia/src/testing/host-target-testing/packages"
"go.fuchsia.dev/fuchsia/src/testing/host-target-testing/paver"
"go.fuchsia.dev/fuchsia/src/testing/host-target-testing/util"
"go.fuchsia.dev/fuchsia/src/testing/host-target-testing/zbi"
"go.fuchsia.dev/fuchsia/tools/lib/logger"
)
type Build interface {
// GetBootserver returns the path to the bootserver used for paving.
GetBootserver(ctx context.Context) (string, error)
// GetPackageRepository returns a Repository for this build.
GetPackageRepository(ctx context.Context) (*packages.Repository, error)
// GetPaverDir downloads and returns the directory containing the images
// and image manifest.
GetPaverDir(ctx context.Context) (string, error)
// GetPaver downloads and returns a paver for the build.
GetPaver(ctx context.Context) (paver.Paver, error)
// GetSshPublicKey returns the SSH public key used by this build's paver.
GetSshPublicKey() ssh.PublicKey
// GetVbmetaPath downloads and returns a path to the zircon-a vbmeta image.
GetVbmetaPath(ctx context.Context) (string, error)
}
// ArchiveBuild represents build artifacts constructed from archives produced by
// the build.
// TODO(fxbug.dev/52021): Remove when no longer using archives. Since this is to
// be deprecated, it should only be used as the backupArchiveBuild of an
// ArtifactsBuild and does not completely implement the Build interface.
type ArchiveBuild struct {
id string
archive *Archive
dir string
packages *packages.Repository
buildArchiveDir string
}
// GetPackageRepository returns a Repository for this build constructed from the
// packages.tar.gz archive.
func (b *ArchiveBuild) GetPackageRepository(ctx context.Context) (*packages.Repository, error) {
if b.packages != nil {
return b.packages, nil
}
archive := "packages.tar.gz"
path := filepath.Join(b.dir, b.id, archive)
if err := b.archive.download(ctx, b.id, false, path, []string{archive}); err != nil {
return nil, fmt.Errorf("failed to download packages.tar.gz: %w", err)
}
packagesDir := filepath.Join(b.dir, b.id, "packages")
if err := os.MkdirAll(packagesDir, 0755); err != nil {
return nil, err
}
p, err := packages.NewRepositoryFromTar(ctx, packagesDir, path)
if err != nil {
return nil, err
}
b.packages = p
return b.packages, nil
}
// GetBuildArchive downloads and extracts the build-archive.tgz from the
// build id `buildId`. Returns a path to the directory of the extracted files,
// or an error if it fails to download or extract.
func (b *ArchiveBuild) GetBuildArchive(ctx context.Context) (string, error) {
if b.buildArchiveDir != "" {
return b.buildArchiveDir, nil
}
archive := "build-archive.tgz"
path := filepath.Join(b.dir, b.id, archive)
if err := b.archive.download(ctx, b.id, false, path, []string{archive}); err != nil {
return "", fmt.Errorf("failed to download build-archive.tar.gz: %w", err)
}
buildArchiveDir := filepath.Join(b.dir, b.id, "build-archive")
if err := os.MkdirAll(buildArchiveDir, 0755); err != nil {
return "", err
}
if err := util.Untar(ctx, buildArchiveDir, path); err != nil {
return "", fmt.Errorf("failed to extract packages: %w", err)
}
b.buildArchiveDir = buildArchiveDir
return b.buildArchiveDir, nil
}
// ArtifactsBuild represents the build artifacts for a specific build.
type ArtifactsBuild struct {
backupArchiveBuild *ArchiveBuild
id string
archive *Archive
dir string
packages *packages.Repository
buildImageDir string
sshPublicKey ssh.PublicKey
}
func (b *ArtifactsBuild) GetBootserver(ctx context.Context) (string, error) {
buildPaver, err := b.getPaver(ctx)
if err != nil {
return "", err
}
return buildPaver.BootserverPath, nil
}
type blob struct {
// Merkle is the merkle associated with a blob.
Merkle string `json:"merkle"`
}
// GetPackageRepository returns a Repository for this build.
func (b *ArtifactsBuild) GetPackageRepository(ctx context.Context) (*packages.Repository, error) {
if b.packages != nil {
return b.packages, nil
}
artifact := "packages"
packagesDir := filepath.Join(b.dir, b.id, artifact)
if err := b.archive.download(ctx, b.id, false, packagesDir, []string{artifact}); err != nil {
logger.Infof(ctx, "failed to fetch artifacts for build %s. Using archives.", b.id)
b.packages, err = b.backupArchiveBuild.GetPackageRepository(ctx)
return b.packages, err
}
blobsManifest := filepath.Join(packagesDir, "all_blobs.json")
blobsData, err := ioutil.ReadFile(blobsManifest)
if err != nil {
if os.IsNotExist(err) {
logger.Infof(ctx, "blobs manifest doesn't exist for build %s yet. Using archives.", b.id)
b.packages, err = b.backupArchiveBuild.GetPackageRepository(ctx)
return b.packages, err
}
return nil, fmt.Errorf("failed to read blobs manifest: %w", err)
}
var blobs []blob
err = json.Unmarshal(blobsData, &blobs)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal blobs JSON: %w", err)
}
var blobsList []string
for _, b := range blobs {
blobsList = append(blobsList, filepath.Join("blobs", b.Merkle))
}
logger.Infof(ctx, "all_blobs contains %d blobs", len(blobsList))
p, err := packages.NewRepository(ctx, packagesDir)
if err != nil {
return nil, err
}
b.packages = p
repoDir := filepath.Join(packagesDir, "repository")
if err := b.archive.download(ctx, b.id, true, repoDir, blobsList); err != nil {
logger.Errorf(ctx, "failed to download blobs to %s: %v", repoDir, err)
return nil, fmt.Errorf("failed to download blobs to %s: %w", repoDir, err)
}
return b.packages, nil
}
// GetBuildImages downloads the build images for a specific build id.
// Returns a path to the directory of the downloaded images or an error if it
// fails to download.
func (b *ArtifactsBuild) GetBuildImages(ctx context.Context) (string, error) {
if b.buildImageDir != "" {
return b.buildImageDir, nil
}
artifact := "images"
imageDir := filepath.Join(b.dir, b.id, artifact)
if err := b.archive.download(ctx, b.id, false, imageDir, []string{artifact}); err != nil {
logger.Infof(ctx, "failed to fetch artifacts for build %s. Using archives.", b.id)
b.buildImageDir, err = b.backupArchiveBuild.GetBuildArchive(ctx)
return b.buildImageDir, err
}
b.buildImageDir = imageDir
return b.buildImageDir, nil
}
func (b *ArtifactsBuild) GetPaverDir(ctx context.Context) (string, error) {
return b.GetBuildImages(ctx)
}
// GetPaver downloads and returns a paver for the build.
func (b *ArtifactsBuild) GetPaver(ctx context.Context) (paver.Paver, error) {
return b.getPaver(ctx)
}
func (b *ArtifactsBuild) getPaver(ctx context.Context) (*paver.BuildPaver, error) {
buildImageDir, err := b.GetBuildImages(ctx)
if err != nil {
return nil, err
}
currentBuildId := os.Getenv("BUILDBUCKET_ID")
if currentBuildId == "" {
currentBuildId = b.id
}
// Use the latest bootserver if possible because the one uploaded with the artifacts may not include bug fixes.
bootserverPath := filepath.Join(buildImageDir, "bootserver")
if err := b.archive.download(ctx, currentBuildId, false, bootserverPath, []string{"tools/linux-x64/bootserver"}); err != nil {
return nil, fmt.Errorf("failed to download bootserver: %w", err)
}
// Make bootserver executable.
if err := os.Chmod(bootserverPath, os.ModePerm); err != nil {
return nil, fmt.Errorf("failed to make bootserver executable: %w", err)
}
return paver.NewBuildPaver(bootserverPath, buildImageDir, paver.SSHPublicKey(b.sshPublicKey))
}
func (b *ArtifactsBuild) GetSshPublicKey() ssh.PublicKey {
return b.sshPublicKey
}
func (b *ArtifactsBuild) GetVbmetaPath(ctx context.Context) (string, error) {
buildImageDir, err := b.GetBuildImages(ctx)
if err != nil {
return "", err
}
imagesJSON := filepath.Join(buildImageDir, paver.ImageManifest)
f, err := os.Open(imagesJSON)
if err != nil {
return "", fmt.Errorf("failed to open %q: %w", imagesJSON, err)
}
defer f.Close()
var items []struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
if err := json.NewDecoder(f).Decode(&items); err != nil {
return "", fmt.Errorf("failed to parse %q: %w", imagesJSON, err)
}
for _, item := range items {
if item.Name == "zircon-a" && item.Type == "vbmeta" {
return filepath.Join(buildImageDir, item.Path), nil
}
}
return "", fmt.Errorf("failed to file zircon-a vbmeta in %q", imagesJSON)
}
func (b *ArtifactsBuild) Pave(ctx context.Context, deviceName string) error {
paver, err := b.GetPaver(ctx)
if err != nil {
return err
}
return paver.Pave(ctx, deviceName)
}
func (b *ArtifactsBuild) String() string {
return b.id
}
type FuchsiaDirBuild struct {
dir string
sshPublicKey ssh.PublicKey
}
func NewFuchsiaDirBuild(dir string, publicKey ssh.PublicKey) *FuchsiaDirBuild {
return &FuchsiaDirBuild{dir: dir, sshPublicKey: publicKey}
}
func (b *FuchsiaDirBuild) String() string {
return b.dir
}
func (b *FuchsiaDirBuild) GetBootserver(ctx context.Context) (string, error) {
return filepath.Join(b.dir, "host_x64/bootserver_new"), nil
}
func (b *FuchsiaDirBuild) GetPackageRepository(ctx context.Context) (*packages.Repository, error) {
return packages.NewRepository(ctx, filepath.Join(b.dir, "amber-files"))
}
func (b *FuchsiaDirBuild) GetPaverDir(ctx context.Context) (string, error) {
return b.dir, nil
}
func (b *FuchsiaDirBuild) GetPaver(ctx context.Context) (paver.Paver, error) {
return paver.NewBuildPaver(
filepath.Join(b.dir, "host_x64/bootserver_new"),
b.dir,
paver.SSHPublicKey(b.sshPublicKey),
)
}
func (b *FuchsiaDirBuild) GetSshPublicKey() ssh.PublicKey {
return b.sshPublicKey
}
func (b *FuchsiaDirBuild) GetVbmetaPath(ctx context.Context) (string, error) {
imagesJSON := filepath.Join(b.dir, paver.ImageManifest)
f, err := os.Open(imagesJSON)
if err != nil {
return "", fmt.Errorf("failed to open %q: %w", imagesJSON, err)
}
defer f.Close()
var items []struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
if err := json.NewDecoder(f).Decode(&items); err != nil {
return "", fmt.Errorf("failed to parse %q: %w", imagesJSON, err)
}
for _, item := range items {
if item.Name == "zircon-a" && item.Type == "vbmeta" {
return filepath.Join(b.dir, item.Path), nil
}
}
return "", fmt.Errorf("failed to file zircon-a vbmeta in %q", imagesJSON)
}
type OmahaBuild struct {
build Build
omahaUrl string
avbtool *avb.AVBTool
zbitool *zbi.ZBITool
}
func NewOmahaBuild(build Build, omahaUrl string, avbtool *avb.AVBTool, zbitool *zbi.ZBITool) *OmahaBuild {
return &OmahaBuild{build: build, omahaUrl: omahaUrl, avbtool: avbtool, zbitool: zbitool}
}
func (b *OmahaBuild) GetBootserver(ctx context.Context) (string, error) {
return b.build.GetBootserver(ctx)
}
// GetPackageRepository returns a Repository for this build.
func (b *OmahaBuild) GetPackageRepository(ctx context.Context) (*packages.Repository, error) {
return b.build.GetPackageRepository(ctx)
}
func (b *OmahaBuild) GetPaverDir(ctx context.Context) (string, error) {
return b.build.GetPaverDir(ctx)
}
// GetPaver downloads and returns a paver for the build.
func (b *OmahaBuild) GetPaver(ctx context.Context) (paver.Paver, error) {
paverDir, err := b.GetPaverDir(ctx)
if err != nil {
return nil, err
}
bootserverPath, err := b.GetBootserver(ctx)
if err != nil {
return nil, err
}
// Create a ZBI with the omaha_url argument.
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %w", err)
}
defer os.RemoveAll(tempDir)
// Create a ZBI with the omaha_url argument.
destZbiPath := path.Join(tempDir, "omaha_argument.zbi")
imageArguments := map[string]string{
"omaha_url": b.omahaUrl,
}
if err := b.zbitool.MakeImageArgsZbi(ctx, destZbiPath, imageArguments); err != nil {
return nil, fmt.Errorf("Failed to create ZBI: %w", err)
}
// Create a vbmeta that includes the ZBI we just created.
propFiles := map[string]string{
"zbi": destZbiPath,
}
destVbmetaPath := filepath.Join(paverDir, "zircon-a-omaha-test.vbmeta")
srcVbmetaPath, err := b.GetVbmetaPath(ctx)
if err != nil {
return nil, fmt.Errorf("failed to find zircon-a vbmeta: %w", err)
}
err = b.avbtool.MakeVBMetaImage(ctx, destVbmetaPath, srcVbmetaPath, propFiles)
if err != nil {
return nil, fmt.Errorf("failed to create vbmeta: %w", err)
}
return paver.NewBuildPaver(
bootserverPath,
paverDir,
paver.SSHPublicKey(b.GetSshPublicKey()),
paver.OverrideVBMetaA(destVbmetaPath),
)
}
func (b *OmahaBuild) GetSshPublicKey() ssh.PublicKey {
return b.build.GetSshPublicKey()
}
func (b *OmahaBuild) GetVbmetaPath(ctx context.Context) (string, error) {
return b.build.GetVbmetaPath(ctx)
}
|
[
"\"BUILDBUCKET_ID\""
] |
[] |
[
"BUILDBUCKET_ID"
] |
[]
|
["BUILDBUCKET_ID"]
|
go
| 1 | 0 | |
src/gluonnlp/data/utils.py
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility classes and functions. They help organize and keep statistics of datasets."""
from __future__ import absolute_import
from __future__ import print_function
__all__ = [
'Counter', 'count_tokens', 'concat_sequence', 'slice_sequence', 'train_valid_split',
'line_splitter', 'whitespace_splitter', 'Splitter'
]
import os
import collections
import zipfile
import tarfile
import numpy as np
from mxnet.gluon.data import SimpleDataset
from mxnet.gluon.utils import _get_repo_url, download, check_sha1
from .. import _constants as C
class Counter(collections.Counter): # pylint: disable=abstract-method
"""Counter class for keeping token frequencies."""
def discard(self, min_freq, unknown_token):
"""Discards tokens with frequency below min_frequency and represents them
as `unknown_token`.
Parameters
----------
min_freq: int
Tokens whose frequency is under min_freq is counted as `unknown_token` in
the Counter returned.
unknown_token: str
The representation for any unknown token.
Returns
-------
The Counter instance.
Examples
--------
>>> a = gluonnlp.data.Counter({'a': 10, 'b': 1, 'c': 1})
>>> a.discard(3, '<unk>')
Counter({'a': 10, '<unk>': 2})
"""
freq = 0
ret = Counter({})
for token, count in self.items():
if count < min_freq:
freq += count
else:
ret[token] = count
ret[unknown_token] = ret.get(unknown_token, 0) + freq
return ret
class DefaultLookupDict(dict):
"""Dictionary class with fall-back look-up with default value set in the constructor."""
def __init__(self, default, d=None):
if d:
super(DefaultLookupDict, self).__init__(d)
else:
super(DefaultLookupDict, self).__init__()
self._default = default
def __getitem__(self, k):
return self.get(k, self._default)
def count_tokens(tokens, to_lower=False, counter=None):
r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tokens.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter : Counter or None, default None
The Counter instance to be updated with the counts of `tokens`. If
None, return a new Counter instance counting tokens from `tokens`.
Returns
-------
The `counter` Counter instance after being updated with the token
counts of `source_str`. If `counter` is None, return a new Counter
instance counting tokens from `source_str`.
Examples
--------
>>> import re
>>> source_str = ' Life is great ! \n life is good . \n'
>>> source_str_tokens = filter(None, re.split(' |\n', source_str))
>>> gluonnlp.data.count_tokens(source_str_tokens)
Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
"""
if to_lower:
tokens = [t.lower() for t in tokens]
if counter is None:
return Counter(tokens)
else:
counter.update(tokens)
return counter
def concat_sequence(sequences):
"""Concatenate sequences of tokens into a single flattened list of tokens.
Parameters
----------
sequences : list of list of object
Sequences of tokens, each of which is an iterable of tokens.
Returns
-------
Flattened list of tokens.
"""
return [token for seq in sequences for token in seq if token]
def slice_sequence(sequence, length, pad_last=False, pad_val=C.PAD_TOKEN, overlap=0):
"""Slice a flat sequence of tokens into sequences tokens, with each
inner sequence's length equal to the specified `length`, taking into account the requested
sequence overlap.
Parameters
----------
sequence : list of object
A flat list of tokens.
length : int
The length of each of the samples.
pad_last : bool, default False
Whether to pad the last sequence when its length doesn't align. If the last sequence's
length doesn't align and ``pad_last`` is False, it will be dropped.
pad_val : object, default
The padding value to use when the padding of the last sequence is enabled. In general,
the type of ``pad_val`` should be the same as the tokens.
overlap : int, default 0
The extra number of items in current sample that should overlap with the
next sample.
Returns
-------
List of list of tokens, with the length of each inner list equal to `length`.
"""
if length <= overlap:
raise ValueError('length needs to be larger than overlap')
if pad_last:
pad_len = _slice_pad_length(len(sequence), length, overlap)
sequence = sequence + [pad_val] * pad_len
num_samples = (len(sequence)-length) // (length-overlap) + 1
return [sequence[i*(length-overlap):((i+1)*length-i*overlap)] for i in range(num_samples)]
def _slice_pad_length(num_items, length, overlap=0):
"""Calculate the padding length needed for sliced samples in order not to discard data.
Parameters
----------
num_items : int
Number of items in dataset before collating.
length : int
The length of each of the samples.
overlap : int, default 0
The extra number of items in current sample that should overlap with the
next sample.
Returns
-------
Length of paddings.
"""
if length <= overlap:
raise ValueError('length needs to be larger than overlap')
step = length-overlap
span = num_items-length
residual = span % step
if residual:
return step - residual
else:
return 0
_vocab_sha1 = {'wikitext-2': 'be36dc5238c2e7d69720881647ab72eb506d0131',
'gbw': 'ebb1a287ca14d8fa6f167c3a779e5e7ed63ac69f',
'WMT2014_src': '230ebb817b1d86950d71e2e765f192a4e4f34415',
'WMT2014_tgt': '230ebb817b1d86950d71e2e765f192a4e4f34415',
'book_corpus_wiki_en_cased': '2d62af22535ed51f35cc8e2abb607723c89c2636',
'book_corpus_wiki_en_uncased': 'a66073971aa0b1a262453fe51342e57166a8abcf',
'wiki_multilingual_cased': '71bb9e248dc75dce9227d3c8c16fde3993588b9e',
'wiki_cn': 'a1e06f8e39ae51ab8a92b8458e6a658b8b1f72bf',
'wiki_multilingual': '2b2514cc539047b9179e9d98a4e68c36db05c97a'}
_url_format = '{repo_url}gluon/dataset/vocab/{file_name}.zip'
def train_valid_split(dataset, valid_ratio=0.05):
"""Split the dataset into training and validation sets.
Parameters
----------
train : list
A list of training samples.
valid_ratio : float, default 0.05
Proportion of training samples to use for validation set
range: [0, 1]
Returns
-------
train : SimpleDataset
valid : SimpleDataset
"""
if not 0.0 <= valid_ratio <= 1.0:
raise ValueError('valid_ratio should be in [0, 1]')
num_train = len(dataset)
num_valid = np.ceil(num_train * valid_ratio).astype('int')
indices = np.arange(num_train)
np.random.shuffle(indices)
valid = SimpleDataset([dataset[indices[i]] for i in range(num_valid)])
train = SimpleDataset([dataset[indices[i + num_valid]] for i in range(num_train - num_valid)])
return train, valid
def short_hash(name):
if name not in _vocab_sha1:
raise ValueError('Vocabulary for {name} is not available.'.format(name=name))
return _vocab_sha1[name][:8]
def _load_pretrained_vocab(name, root=os.path.join('~', '.mxnet', 'models'), cls=None):
"""Load the accompanying vocabulary object for pre-trained model.
Parameters
----------
name : str
Name of the vocabulary, usually the name of the dataset.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
cls : nlp.Vocab or nlp.vocab.BERTVocab, default nlp.Vocab
Returns
-------
Vocab or nlp.bert.BERTVocab
Loaded vocabulary object for the pre-trained model.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.vocab')
sha1_hash = _vocab_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return _load_vocab_file(file_path, cls)
else:
print('Detected mismatch in the content of model vocab file. Downloading again.')
else:
print('Vocab file is not found. Downloading.')
if not os.path.exists(root):
os.makedirs(root)
zip_file_path = os.path.join(root, file_name+'.zip')
repo_url = _get_repo_url()
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return _load_vocab_file(file_path, cls)
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def _load_vocab_file(file_path, cls):
with open(file_path, 'r') as f:
if cls is None:
from ..vocab import Vocab
cls = Vocab
return cls.from_json(f.read())
def _get_home_dir():
"""Get home directory for storing datasets/models/pre-trained word embeddings"""
_home_dir = os.environ.get('MXNET_HOME', os.path.join('~', '.mxnet'))
# expand ~ to actual path
_home_dir = os.path.expanduser(_home_dir)
return _home_dir
def _extract_archive(file, target_dir):
"""Extract archive file
Parameters
----------
file : str
Absolute path of the archive file.
target_dir : str
Target directory of the archive to be uncompressed
"""
if file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz'):
archive = tarfile.open(file, 'r')
elif file.endswith('.zip'):
archive = zipfile.ZipFile(file, 'r')
else:
raise Exception('Unrecognized file type: ' + file)
archive.extractall(path=target_dir)
archive.close()
def line_splitter(s):
"""Split a string at newlines.
Parameters
----------
s : str
The string to be split
Returns
--------
List[str]
List of strings. Obtained by calling s.splitlines().
"""
return s.splitlines()
def whitespace_splitter(s):
"""Split a string at whitespace (space, tab, newline, return, formfeed).
Parameters
----------
s : str
The string to be split
Returns
--------
List[str]
List of strings. Obtained by calling s.split().
"""
return s.split()
class Splitter(object):
"""Split a string based on a separator.
Parameters
----------
separator : str
The separator based on which string is split.
"""
def __init__(self, separator=None):
self._separator = separator
def __call__(self, s):
"""Split a string based on the separator.
Parameters
----------
s : str
The string to be split
Returns
--------
List[str]
List of strings. Obtained by calling s.split(separator).
"""
return s.split(self._separator)
|
[] |
[] |
[
"MXNET_HOME"
] |
[]
|
["MXNET_HOME"]
|
python
| 1 | 0 | |
src/TextSummarization/wsgi.py
|
"""
WSGI config for TextSummarization project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TextSummarization.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drf-permissions-router'
package = 'drf_permissions_router'
description = 'Apply permissions to views through routers'
url = 'https://github.com/crowdcomms/drf-permissions-router'
author = 'Adam Jacquier-Parr'
author_email = '[email protected]'
license = 'Apache'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
TWINE_USERNAME = os.environ.get("TWINE_USERNAME")
TWINE_PASSWORD = os.environ.get("TWINE_PASSWORD")
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist")
os.system("twine upload -u '{}' -p '{}' dist/{}-{}.tar.gz".format(TWINE_USERNAME, TWINE_PASSWORD, name, version))
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
long_description=open('README.md').read(),
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[
'djangorestframework>=2.4.3',
'Django>=1.8',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
[] |
[] |
[
"TWINE_PASSWORD",
"TWINE_USERNAME"
] |
[]
|
["TWINE_PASSWORD", "TWINE_USERNAME"]
|
python
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.