filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/openshift/origin/cmd/oc/oc.go | package main
import (
"math/rand"
"os"
"path/filepath"
"runtime"
"time"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/kubernetes/pkg/kubectl/scheme"
"github.com/openshift/origin/pkg/cmd/util/serviceability"
"github.com/openshift/origin/pkg/oc/cli"
// install all APIs
apiinstall "github.com/openshift/origin/pkg/api/install"
apilegacy "github.com/openshift/origin/pkg/api/legacy"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"))()
defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()
rand.Seed(time.Now().UTC().UnixNano())
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
apiinstall.InstallAll(scheme.Scheme, scheme.GroupFactoryRegistry, scheme.Registry)
apilegacy.LegacyInstallAll(scheme.Scheme, scheme.Registry)
basename := filepath.Base(os.Args[0])
command := cli.CommandFor(basename)
if err := command.Execute(); err != nil {
os.Exit(1)
}
}
| [
"\"OPENSHIFT_ON_PANIC\"",
"\"OPENSHIFT_PROFILE\"",
"\"GOMAXPROCS\""
]
| []
| [
"OPENSHIFT_PROFILE",
"OPENSHIFT_ON_PANIC",
"GOMAXPROCS"
]
| [] | ["OPENSHIFT_PROFILE", "OPENSHIFT_ON_PANIC", "GOMAXPROCS"] | go | 3 | 0 | |
legacy_test/services/policy_engine/db/test_distro_mapping.py | import unittest
import logging
import os
from test.services.policy_engine.utils import LocalTestDataEnvironment, init_db
from anchore_engine.db.entities.policy_engine import DistroNamespace, DistroMapping, DistroTuple, VersionPreservingDistroMapper
class TestDistroMappers(unittest.TestCase):
test_env = LocalTestDataEnvironment(os.environ['ANCHORE_ENGINE_TESTING_HOME'])
app = None
test_gates = [
{
'gate': 'DOCKERFILECHECK',
'trigger': 'EXPOSE',
'action': 'GO',
'params': {
'ALLOWEDPORTS': '80,443',
'DENIEDPORTS': '8080,22,21'
}
},
{
'gate': 'DOCKERFILECHECK',
'trigger': 'NOFROM',
'action': 'GO',
'params': {}
}
]
@classmethod
def setUpClass(cls):
init_db(cls.test_env.mk_db(), conn_args={})
logging.basicConfig(level=logging.DEBUG)
def test_simple_map(self):
found = DistroMapping()
found.from_distro = 'centos'
found.to_distro = 'centos'
found.flavor = 'RHEL'
mapper = VersionPreservingDistroMapper('centos', '7', None, found)
print(('Mapped centos to: {}'.format(mapper.mapping)))
found.from_distro = 'fedora'
found.to_distro = 'centos'
found.flavor = 'RHEL'
mapper = VersionPreservingDistroMapper('fedora', '27', 'centos', found)
print(('Mapped fedora to: {}'.format(mapper.mapping)))
mapper = VersionPreservingDistroMapper('fedora', '27', 'centos', None)
print(('Mapped fedora to: {} on empty input'.format(mapper.mapping)))
def test_distro_from(self):
print((DistroMapping.distros_for('centos', '7', 'rhel')))
print((DistroMapping.distros_for('centos', '7.4.1', 'rhel')))
print((DistroMapping.distros_for('debian', '9')))
print((DistroMapping.distros_for('ubuntu', '16.04')))
print((DistroMapping.distros_for('busybox', '3')))
print((DistroMapping.distros_for('raspbian', '5')))
print((DistroMapping.distros_for('magaiea', '3')))
print((DistroMapping.distros_for('magaiea', '5', 'fedora,mandriva')))
| []
| []
| [
"ANCHORE_ENGINE_TESTING_HOME"
]
| [] | ["ANCHORE_ENGINE_TESTING_HOME"] | python | 1 | 0 | |
doc/make.py | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for statsmodels
installed. See the installation instructions for a list of these.
Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.
Usage
-----
python make.py clean
python make.py html
"""
import glob
import os
import shutil
import sys
import sphinx
os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def sf():
'push a copy to the sf site'
os.system('cd build/html; rsync -avz . wesmckinn,[email protected]'
':/home/groups/p/pa/pandas/htdocs/ -essh --cvs-exclude')
def sfpdf():
'push a copy to the sf site'
os.system('cd build/latex; scp pandas.pdf wesmckinn,[email protected]'
':/home/groups/p/pa/pandas/htdocs/')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
def html():
check_build()
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering LaTeX failed.")
os.chdir('../..')
else:
print 'latex build has not been tested on windows'
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
funcd = {
'html' : html,
'latex' : latex,
'clean' : clean,
'sf' : sf,
'sfpdf' : sfpdf,
'all' : all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
if len(sys.argv)>1:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s'%(
arg, funcd.keys()))
func()
else:
small_docs = False
all()
#os.chdir(current_dir)
| []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
manage.py | # !/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'bootstrap_components.tests.settings'
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
setup.py | # encoding: utf-8
import os
import os.path
from pkg_resources import parse_version
# Avoid problem releasing to pypi from vagrant
if os.environ.get('USER', '') == 'vagrant':
del os.link
try:
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
from ckan import (__version__, __description__, __long_description__,
__license__)
#
# Check setuptools version
#
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:
setuptools_requirement = f.read().strip()
min_setuptools_version = parse_version(setuptools_requirement.split('==')[1])
if parse_version(setuptools_version) < min_setuptools_version:
raise AssertionError(
'setuptools version error\n'
'You need a newer version of setuptools.\n'
'Install the recommended version:\n'
' pip install -r requirement-setuptools.txt\n'
'and then try again to install ckan into your python environment.'
)
entry_points = {
'paste.app_factory': [
'main = ckan.config.middleware:make_app',
],
'paste.app_install': [
'main = ckan.config.install:CKANInstaller',
],
'console_scripts': [
'ckan = ckan.cli.cli:ckan',
],
'ckan.click_command': [
'datastore = ckanext.datastore.cli:datastore',
'datapusher = ckanext.datapusher.cli:datapusher',
],
'paste.paster_create_template': [
'ckanext = ckan.pastertemplates:CkanextTemplate',
],
'ckan.forms': [
'standard = ckan.forms.package:get_standard_fieldset',
'package = ckan.forms.package:get_standard_fieldset',
'group = ckan.forms.group:get_group_fieldset',
'package_group = ckan.forms.group:get_package_group_fieldset',
],
'ckan.search': [
'sql = ckan.lib.search.sql:SqlSearchBackend',
'solr = ckan.lib.search.solr_backend:SolrSearchBackend',
],
'ckan.plugins': [
'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',
'stats = ckanext.stats.plugin:StatsPlugin',
'publisher_form = ckanext.publisher_form.forms:PublisherForm',
'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',
'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',
'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',
'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',
'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',
'organizations = ckanext.organizations.forms:OrganizationForm',
'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',
'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',
'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',
'datastore = ckanext.datastore.plugin:DatastorePlugin',
'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',
'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',
'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',
'text_view = ckanext.textview.plugin:TextView',
'recline_view = ckanext.reclineview.plugin:ReclineView',
'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',
'datatables_view = ckanext.datatablesview.plugin:DataTablesView',
'image_view = ckanext.imageview.plugin:ImageView',
'audio_view = ckanext.audioview.plugin:AudioView',
'video_view = ckanext.videoview.plugin:VideoView',
'webpage_view = ckanext.webpageview.plugin:WebPageView',
# FIXME: Remove deprecated resource previews below. You should use the
# versions as *_view instead.
'text_preview = ckanext.textview.plugin:TextView',
'recline_preview = ckanext.reclineview.plugin:ReclineView',
'recline_grid = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map = ckanext.reclineview.plugin:ReclineMapView',
# End of deprecated previews
'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',
'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',
'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',
'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',
'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',
'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',
'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',
'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',
'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',
'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',
'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',
'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',
'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',
'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',
'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',
'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',
'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',
'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',
'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',
'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',
'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',
'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',
'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',
'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',
'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',
'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',
'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',
'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',
'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',
'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',
'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',
'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',
'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',
'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',
'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',
'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',
'example_theme_v22_fanstatic_and_webassets = ckanext.example_theme_docs.v22_fanstatic_and_webassets.plugin:ExampleThemePlugin',
'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',
'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',
'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',
'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',
'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',
'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',
'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',
'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',
'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',
'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',
'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',
'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',
'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',
'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',
'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',
'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',
],
'ckan.system_plugins': [
'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',
],
'ckan.test_plugins': [
'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',
'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',
'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',
'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',
'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',
'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',
'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',
'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',
'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',
'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',
'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',
'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',
'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',
'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',
'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',
'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',
'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',
'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',
'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',
'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',
'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',
'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',
'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',
'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',
],
'babel.extractors': [
'ckan = ckan.lib.extract:extract_ckan',
],
}
extras_require = {}
_extras_groups = [
('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'),
('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),
]
for group, filepath in _extras_groups:
with open(os.path.join(HERE, filepath), 'r') as f:
extras_require[group] = f.readlines()
setup(
name='ckan',
version=__version__,
author='https://github.com/ckan/ckan/graphs/contributors',
author_email='[email protected]',
license=__license__,
url='http://ckan.org/',
description=__description__,
keywords='data packaging component tool server',
long_description=__long_description__,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup']),
# namespace_packages=['ckanext', 'ckanext.stats'],
message_extractors={
'ckan': [
('templates/importer/**', 'ignore', None),
('templates/**.html', 'ckan', None),
('templates/**.txt', 'ckan', None),
('templates_legacy/**.html', 'ckan', None),
('public/base/test/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
],
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**.html', 'ckan', None),
('multilingual/solr/*.txt', 'ignore', None),
]
},
entry_points=entry_points,
# setup.py test command needs a TestSuite so does not work with py.test
# tests_require=[ 'py >= 0.8.0-alpha2' ]
extras_require=extras_require,
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
util/template/expression_template.go | package template
import (
"encoding/json"
"fmt"
"io"
"os"
"github.com/antonmedv/expr"
"github.com/antonmedv/expr/file"
"github.com/antonmedv/expr/parser/lexer"
)
func init() {
if os.Getenv("EXPRESSION_TEMPLATES") != "false" {
registerKind(kindExpression)
}
}
func expressionReplace(w io.Writer, expression string, env map[string]interface{}, allowUnresolved bool) (int, error) {
// The template is JSON-marshaled. This JSON-unmarshals the expression to undo any character escapes.
var unmarshalledExpression string
err := json.Unmarshal([]byte(fmt.Sprintf(`"%s"`, expression)), &unmarshalledExpression)
if err != nil && allowUnresolved {
return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression)))
}
if err != nil {
return 0, fmt.Errorf("failed to unmarshall JSON expression: %w", err)
}
if _, ok := env["retries"]; !ok && hasRetries(unmarshalledExpression) && allowUnresolved {
// this is to make sure expressions like `sprig.int(retries)` don't get resolved to 0 when `retries` don't exist in the env
// See https://github.com/argoproj/argo-workflows/issues/5388
return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression)))
}
result, err := expr.Eval(unmarshalledExpression, env)
if (err != nil || result == nil) && allowUnresolved { // <nil> result is also un-resolved, and any error can be unresolved
return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression)))
}
if err != nil {
return 0, fmt.Errorf("failed to evaluate expression: %w", err)
}
if result == nil {
return 0, fmt.Errorf("failed to evaluate expression %q", expression)
}
resultMarshaled, err := json.Marshal(fmt.Sprintf("%v", result))
if (err != nil || resultMarshaled == nil) && allowUnresolved {
return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression)))
}
if err != nil {
return 0, fmt.Errorf("failed to marshal evaluated expression: %w", err)
}
if resultMarshaled == nil {
return 0, fmt.Errorf("failed to marshal evaluated marshaled expression %q", expression)
}
// Trim leading and trailing quotes. The value is being inserted into something that's already a string.
marshaledLength := len(resultMarshaled)
return w.Write(resultMarshaled[1 : marshaledLength-1])
}
func envMap(replaceMap map[string]string) map[string]interface{} {
envMap := make(map[string]interface{})
for k, v := range replaceMap {
envMap[k] = v
}
return envMap
}
// hasRetries checks if the variable `retries` exists in the expression template
func hasRetries(expression string) bool {
tokens, err := lexer.Lex(file.NewSource(expression))
if err != nil {
return false
}
for _, token := range tokens {
if token.Kind == lexer.Identifier && token.Value == "retries" {
return true
}
}
return false
}
| [
"\"EXPRESSION_TEMPLATES\""
]
| []
| [
"EXPRESSION_TEMPLATES"
]
| [] | ["EXPRESSION_TEMPLATES"] | go | 1 | 0 | |
maintenance/docs.py | import sys
import os
import glob
import shutil
import datetime
assert 'pymel' not in sys.modules or 'PYMEL_INCLUDE_EXAMPLES' in os.environ, "to generate docs PYMEL_INCLUDE_EXAMPLES env var must be set before pymel is imported"
# remember, the processed command examples are not version specific. you must
# run cmdcache.fixCodeExamples() to bring processed examples in from the raw
# version-specific example caches
os.environ['PYMEL_INCLUDE_EXAMPLES'] = 'True'
pymel_root = os.path.dirname(os.path.dirname(sys.modules[__name__].__file__))
docsdir = os.path.join(pymel_root, 'docs')
stubdir = os.path.join(pymel_root, 'extras', 'completion', 'py')
useStubs = False
if useStubs:
sys.path.insert(0, stubdir)
import pymel
print pymel.__file__
else:
import pymel
# make sure dynamic modules are fully loaded
from pymel.core.uitypes import *
from pymel.core.nodetypes import *
version = pymel.__version__.rsplit('.',1)[0]
SOURCE = 'source'
BUILD_ROOT = 'build'
BUILD = os.path.join(BUILD_ROOT, version)
sourcedir = os.path.join(docsdir, SOURCE)
gendir = os.path.join(sourcedir, 'generated')
buildrootdir = os.path.join(docsdir, BUILD_ROOT)
builddir = os.path.join(docsdir, BUILD)
from pymel.internal.cmdcache import fixCodeExamples
def generate(clean=True):
"delete build and generated directories and generate a top-level documentation source file for each module."
print "generating %s - %s" % (docsdir, datetime.datetime.now())
from sphinx.ext.autosummary.generate import main as sphinx_autogen
if clean:
clean_build()
clean_generated()
os.chdir(sourcedir)
sphinx_autogen( [''] + '--templates ../templates modules.rst'.split() )
sphinx_autogen( [''] + '--templates ../templates'.split() + glob.glob('generated/pymel.*.rst') )
print "...done generating %s - %s" % (docsdir, datetime.datetime.now())
def clean_build():
"delete existing build directory"
if os.path.exists(buildrootdir):
print "removing %s - %s" % (buildrootdir, datetime.datetime.now())
shutil.rmtree(buildrootdir)
def clean_generated():
"delete existing generated directory"
if os.path.exists(gendir):
print "removing %s - %s" % (gendir, datetime.datetime.now())
shutil.rmtree(gendir)
def find_dot():
if os.name == 'posix':
dot_bin = 'dot'
else:
dot_bin = 'dot.exe'
for p in os.environ['PATH'].split(os.pathsep):
d = os.path.join(p, dot_bin)
if os.path.exists(d):
return d
raise TypeError('cannot find graphiz dot executable in the path (%s)' % os.environ['PATH'])
def copy_changelog():
changelog = os.path.join(pymel_root, 'CHANGELOG.rst')
whatsnew = os.path.join(pymel_root, 'docs', 'source', 'whats_new.rst')
shutil.copy2(changelog, whatsnew)
def build(clean=True, **kwargs):
from sphinx import main as sphinx_build
print "building %s - %s" % (docsdir, datetime.datetime.now())
if not os.path.isdir(gendir):
generate()
os.chdir( docsdir )
if clean:
clean_build()
copy_changelog()
#mkdir -p build/html build/doctrees
#import pymel.internal.cmdcache as cmdcache
#cmdcache.fixCodeExamples()
opts = ['']
opts += '-b html -d build/doctrees'.split()
# set some defaults
if not kwargs.get('graphviz_dot', None):
kwargs['graphviz_dot'] = find_dot()
for key, value in kwargs.iteritems():
opts.append('-D')
opts.append( key.strip() + '=' + value.strip() )
opts.append('-P')
opts.append(SOURCE)
opts.append(BUILD)
sphinx_build(opts)
print "...done building %s - %s" % (docsdir, datetime.datetime.now())
| []
| []
| [
"PYMEL_INCLUDE_EXAMPLES",
"PATH"
]
| [] | ["PYMEL_INCLUDE_EXAMPLES", "PATH"] | python | 2 | 0 | |
test/e2e/run_userns_test.go | // +build !remoteclient
package integration
import (
"os"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Podman UserNS support", func() {
var (
tempdir string
err error
podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
if os.Getenv("SKIP_USERNS") != "" {
Skip("Skip userns tests.")
}
if _, err := os.Stat("/proc/self/uid_map"); err != nil {
Skip("User namespaces not supported.")
}
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
podmanTest.RestoreAllArtifacts()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
It("podman uidmapping and gidmapping", func() {
session := podmanTest.Podman([]string{"run", "--uidmap=0:100:5000", "--gidmap=0:200:5000", "alpine", "echo", "hello"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ok, _ := session.GrepString("hello")
Expect(ok).To(BeTrue())
})
// It essentially repeats the test above but with the `-it` short option
// that broke execution at:
// https://github.com/containers/libpod/pull/1066#issuecomment-403562116
// To avoid a potential future regression, use this as a test.
It("podman uidmapping and gidmapping with short-opts", func() {
session := podmanTest.Podman([]string{"run", "--uidmap=0:1:5000", "--gidmap=0:200:5000", "-it", "alpine", "echo", "hello"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ok, _ := session.GrepString("hello")
Expect(ok).To(BeTrue())
})
It("podman uidmapping and gidmapping with a volume", func() {
session := podmanTest.Podman([]string{"run", "--uidmap=0:1:500", "--gidmap=0:200:5000", "-v", "my-foo-volume:/foo:Z", "alpine", "echo", "hello"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ok, _ := session.GrepString("hello")
Expect(ok).To(BeTrue())
})
It("podman uidmapping and gidmapping --net=host", func() {
session := podmanTest.Podman([]string{"run", "--net=host", "--uidmap=0:1:5000", "--gidmap=0:200:5000", "alpine", "echo", "hello"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
ok, _ := session.GrepString("hello")
Expect(ok).To(BeTrue())
})
})
| [
"\"SKIP_USERNS\""
]
| []
| [
"SKIP_USERNS"
]
| [] | ["SKIP_USERNS"] | go | 1 | 0 | |
projects/batfish/src/main/java/org/batfish/main/PreprocessJuniper.java | package org.batfish.main;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import javax.annotation.Nonnull;
import org.batfish.common.BatfishException;
import org.batfish.common.BatfishLogger;
import org.batfish.common.BfConsts;
import org.batfish.common.Warnings;
import org.batfish.common.util.CommonUtil;
import org.batfish.config.Settings;
import org.batfish.job.BatfishJobExecutor;
import org.batfish.job.PreprocessJuniperJob;
/** Utility to dump output of Juniper configuration pre-processing. */
public final class PreprocessJuniper {
public static void main(String[] args) {
checkArgument(args.length == 2, "Expected arguments: <input_dir> <output_dir>");
Path inputPath = Paths.get(args[0]);
Path outputPath = Paths.get(args[1]);
// Bazel: resolve relative to current working directory. No-op if paths are already absolute.
String wd = System.getenv("BUILD_WORKING_DIRECTORY");
if (wd != null) {
inputPath = Paths.get(wd).resolve(inputPath);
outputPath = Paths.get(wd).resolve(outputPath);
}
Settings settings = new Settings(new String[] {"-storagebase", "/"});
BatfishLogger logger = new BatfishLogger(BatfishLogger.LEVELSTR_WARN, false, System.out);
settings.setLogger(logger);
new PreprocessJuniper(new Batfish(settings, null, null, null, null, null))
.preprocessJuniper(inputPath, outputPath);
}
private final @Nonnull BatfishLogger _logger;
private final @Nonnull Settings _settings;
private PreprocessJuniper(Batfish batfish) {
_logger = batfish.getSettings().getLogger();
_settings = batfish.getSettings();
}
/**
* Pre-process Juniper configs in snapshot stored at {@code inputPath}, and dump to {@code
* outputPath}. Non-Juniper configs are copied unprocessed.
*/
private void preprocessJuniper(@Nonnull Path inputPath, @Nonnull Path outputPath) {
_logger.info("\n*** READING INPUT FILES ***\n");
Map<Path, String> configurationData =
Batfish.readAllFiles(inputPath.resolve(BfConsts.RELPATH_CONFIGURATIONS_DIR), _logger);
Map<Path, String> outputConfigurationData = new TreeMap<>();
Path outputConfigDir = outputPath.resolve(BfConsts.RELPATH_CONFIGURATIONS_DIR);
try {
Files.createDirectories(outputConfigDir);
} catch (IOException e) {
throw new BatfishException(
String.format("Failed to create output directory: '%s'", outputPath));
}
_logger.info("\n*** COMPUTING OUTPUT FILES ***\n");
_logger.resetTimer();
List<PreprocessJuniperJob> jobs = new ArrayList<>();
for (Entry<Path, String> configFile : configurationData.entrySet()) {
Path inputFile = configFile.getKey();
String fileText = configFile.getValue();
Warnings warnings = Batfish.buildWarnings(_settings);
String name = inputFile.getFileName().toString();
Path outputFile = outputConfigDir.resolve(name);
PreprocessJuniperJob job =
new PreprocessJuniperJob(_settings, fileText, inputFile, outputFile, warnings);
jobs.add(job);
}
BatfishJobExecutor.runJobsInExecutor(
_settings,
_logger,
jobs,
outputConfigurationData,
null,
_settings.getFlatten() || _settings.getHaltOnParseError(),
"Preprocesss Juniper configurations");
_logger.printElapsedTime();
for (Entry<Path, String> e : outputConfigurationData.entrySet()) {
Path outputFile = e.getKey();
String preprocessedConfigText = e.getValue();
String outputFileAsString = outputFile.toString();
_logger.debugf("Writing config to \"%s\"...", outputFileAsString);
CommonUtil.writeFile(outputFile, preprocessedConfigText);
_logger.debug("OK\n");
}
}
}
| [
"\"BUILD_WORKING_DIRECTORY\""
]
| []
| [
"BUILD_WORKING_DIRECTORY"
]
| [] | ["BUILD_WORKING_DIRECTORY"] | java | 1 | 0 | |
test/e2e/upgradetest/framework/framework.go | // Copyright 2017 The etcd-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package framework
import (
"os"
"time"
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
"github.com/coreos/etcd-operator/test/e2e/e2eutil"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
appsv1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1"
"k8s.io/client-go/tools/clientcmd"
)
type Config struct {
// program flags
KubeConfig string
KubeNS string
OldImage string
NewImage string
}
type Framework struct {
Config
// global var
KubeCli kubernetes.Interface
S3Cli *s3.S3
S3Bucket string
}
func New(fc Config) (*Framework, error) {
kc, err := clientcmd.BuildConfigFromFlags("", fc.KubeConfig)
if err != nil {
return nil, err
}
kubecli, err := kubernetes.NewForConfig(kc)
if err != nil {
return nil, err
}
f := &Framework{
Config: fc,
KubeCli: kubecli,
}
err = f.setupAWS()
return f, err
}
func (f *Framework) CreateOperator() error {
cmd := []string{"/usr/local/bin/etcd-operator", "--analytics=false",
"--backup-aws-secret=aws", "--backup-aws-config=aws", "--backup-s3-bucket=jenkins-etcd-operator"}
name := "etcd-operator"
image := f.OldImage
selector := map[string]string{"name": "etcd-operator"}
d := &appsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: f.KubeNS,
},
Spec: appsv1beta1.DeploymentSpec{
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: selector,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: name,
Image: image,
ImagePullPolicy: v1.PullAlways,
Command: cmd,
Env: []v1.EnvVar{
{
Name: "MY_POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
},
{
Name: "MY_POD_NAME",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
},
},
}},
},
},
},
}
_, err := f.KubeCli.AppsV1beta1().Deployments(f.KubeNS).Create(d)
return err
}
func (f *Framework) DeleteOperator() error {
foreground := metav1.DeletePropagationForeground
err := f.KubeCli.AppsV1beta1().Deployments(f.KubeNS).Delete("etcd-operator", &metav1.DeleteOptions{
GracePeriodSeconds: func(t int64) *int64 { return &t }(0),
PropagationPolicy: &foreground,
})
if err != nil {
return err
}
// Wait until the etcd-operator pod is actually gone and not just terminating.
// In upgrade tests, the next test shouldn't see any etcd operator pod.
lo := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{"name": "etcd-operator"}).String(),
}
_, err = e2eutil.WaitPodsDeleted(f.KubeCli, f.KubeNS, 30*time.Second, lo)
return err
}
func (f *Framework) UpgradeOperator() error {
uf := func(d *appsv1beta1.Deployment) {
d.Spec.Template.Spec.Containers[0].Image = f.NewImage
}
err := k8sutil.PatchDeployment(f.KubeCli, f.KubeNS, "etcd-operator", uf)
if err != nil {
return err
}
lo := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{"name": "etcd-operator"}).String(),
}
_, err = e2eutil.WaitPodsWithImageDeleted(f.KubeCli, f.KubeNS, f.OldImage, 30*time.Second, lo)
return err
}
func (f *Framework) setupAWS() error {
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", os.Getenv("AWS_CREDENTIAL")); err != nil {
return err
}
if err := os.Setenv("AWS_CONFIG_FILE", os.Getenv("AWS_CONFIG")); err != nil {
return err
}
sess, err := session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
})
if err != nil {
return err
}
f.S3Cli = s3.New(sess)
f.S3Bucket = "jenkins-etcd-operator"
return nil
}
| [
"\"AWS_CREDENTIAL\"",
"\"AWS_CONFIG\""
]
| []
| [
"AWS_CONFIG",
"AWS_CREDENTIAL"
]
| [] | ["AWS_CONFIG", "AWS_CREDENTIAL"] | go | 2 | 0 | |
cfn_auto_update_broker.py | """Create auto update CloudWatch events."""
import boto3
import cfnresponse
import os
import logging
import json
client = boto3.client('cloudformation')
event = boto3.client('events')
aws_lambda = boto3.client('lambda')
function_name = os.environ['FUNCTION_NAME']
region = os.environ['REGION']
account_id = boto3.client('sts').get_caller_identity().get('Account')
# https://stackoverflow.com/questions/37703609/using-python-logging-with-aws-lambda
# while len(logging.root.handlers) > 0:
# logging.root.removeHandler(logging.root.handlers[-1])
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class AWSLambda(object):
"""Define AWS lambda function and associated operations."""
def __init__(self, event_name):
"""Define AWS lambda function components."""
self.name = function_name
self.event_name = event_name
self.statement_id = "AWSEvents_{}_{}".format(self.event_name,
self.name)
self.rule_arn = "arn:aws:events:{}:{}:rule/{}".format(region,
account_id,
self.event_name)
self.get_function_input = {'FunctionName': self.name}
self.add_permission_input = {
'FunctionName': self.name,
'StatementId': self.statement_id,
'Action': 'lambda:InvokeFunction',
'Principal': 'events.amazonaws.com',
'SourceArn': self.rule_arn,
}
self.remove_permission_input = {
'FunctionName': self.name,
'StatementId': self.statement_id
}
class CloudwatchEvent(object):
"""Define Cloudwatch event and associated operations."""
def __init__(self, stack_name, interval, toggle_parameter,
toggle_values):
"""Define Cloudwatch event components."""
self.stack_name = stack_name
self.name = "auto-update-{}".format(self.stack_name)
self.interval = interval
self.toggle_parameter = toggle_parameter
self.toggle_values = toggle_values
self.description = "trigger for {} auto update".format(self.stack_name)
self.target_function_name = function_name
self.target_lambda_arn = get_lambda_arn(
FunctionName=self.target_function_name)
self.event_constant = {
'event_name': self.name,
'stack_name': self.stack_name,
'toggle_parameter': self.toggle_parameter,
'toggle_values': self.toggle_values
}
self.rule_text = {
'Name': self.name,
'ScheduleExpression': self.interval,
'State': 'ENABLED',
'Description': self.description
}
self.put_targets_input = {
'Rule': self.name,
'Targets': [
{
'Id': self.target_function_name,
'Arn': self.target_lambda_arn,
'Input': json.dumps(self.event_constant)
}
]
}
self.remove_targets_input = {
'Rule': self.name,
'Ids': [
self.target_function_name,
]
}
self.delete_rule_input = {
'Name': self.name
}
def get_lambda_arn(**kwargs):
"""Return lambda function arn."""
response = aws_lambda.get_function(**kwargs)
log.info("get_lambda_name: {}".format(response))
lambda_arn = response['Configuration']['FunctionArn']
return lambda_arn
def lambda_add_resource_policy(**kwargs):
"""Update lambda resource policy."""
try:
response = aws_lambda.add_permission(**kwargs)
log.info("lambda_add_resource_policy: {}".format(response))
except aws_lambda.exceptions.ResourceConflictException as e:
log.info('Resource policy already exists.')
response = None
return response
def lambda_remove_resource_policy(**kwargs):
"""Remove lambda resource policy."""
response = aws_lambda.remove_permission(**kwargs)
log.info("lambda_remove_resource_policy: {}".format(response))
return response
def create_event(**kwargs):
"""Create a cloudwatch event."""
response = event.put_rule(**kwargs)
log.info("create_event: {}".format(response))
return response
def put_targets(**kwargs):
"""Set Cloudwatch event target."""
response = event.put_targets(**kwargs)
log.info("put_targets: {}".format(response))
return response
def remove_event_targets(**kwargs):
"""Remove CloudWatch event target."""
"""
Cloudwatch events cannot be deleted if they ref a target
"""
try:
response = event.remove_targets(**kwargs)
log.info("remove_targets: {}".format(response))
except event.exceptions.ResourceNotFoundException as e:
log.info('Event previously removed.')
response = None
return response
def delete_event(**kwargs):
"""Delete target cloudwatch event."""
response = event.delete_rule(**kwargs)
log.info("delete_event: {}".format(response))
return response
def lambda_handler(event, context):
"""Parse event."""
log.info("labmda_handler recieved event: {}".format(event))
response_type = cfnresponse.FAILED
try:
response_value = event['ResourceProperties']
response_data = {}
response_data['Data'] = response_value
toggle_values = event['ResourceProperties']['ToggleValues']
toggle_parameter = event['ResourceProperties']['ToggleParameter']
interval = event['ResourceProperties']['UpdateSchedule']
stack_name = event['ResourceProperties']['StackName']
reason = None
def cfn_delete_request():
"""Delete event."""
log.info('Recieved Delete event')
event_obj = CloudwatchEvent(stack_name, None, None, None)
aws_lambda_obj = AWSLambda(event_obj.name)
try:
lambda_remove_resource_policy(
**aws_lambda_obj.remove_permission_input)
except aws_lambda.exceptions.ResourceNotFoundException as e:
log.info('Resource policy previously removed.')
remove_event_targets(**event_obj.remove_targets_input)
try:
delete_event(**event_obj.delete_rule_input)
except event.exceptions.ResourceNotFoundException as e:
log.info('Event previously deleted.')
return cfnresponse.SUCCESS
def cfn_update_request():
"""Update event."""
log.info('Recieved Update event')
event_obj = CloudwatchEvent(stack_name, interval, toggle_parameter,
toggle_values)
stack = client.describe_stacks(StackName=stack_name)['Stacks'][0]
if stack['StackStatus'] is not 'CREATE_IN_PROGRESS':
create_event(**event_obj.rule_text)
log.info('Succesfully updated auto-update rule: {}'.format(
event_obj.name))
return cfnresponse.SUCCESS
def cfn_create_request():
"""Create event."""
log.info('Recieved Create event')
event_obj = CloudwatchEvent(stack_name, interval, toggle_parameter,
toggle_values)
create_event(**event_obj.rule_text)
put_targets(**event_obj.put_targets_input)
aws_lambda_obj = AWSLambda(event_obj.name)
try:
lambda_add_resource_policy(
**aws_lambda_obj.add_permission_input)
except aws_lambda.exceptions.ResourceConflictException as e:
log.info('Stale resource policy detected.')
lambda_remove_resource_policy(
**aws_lambda_obj.remove_permission_input)
log.info('removed stale resource policy.')
lambda_add_resource_policy(
**aws_lambda_obj.add_permission_input)
log.info('resource policy successfully added.')
return cfnresponse.SUCCESS
if event['RequestType'] == "Delete":
response_type = cfn_delete_request()
elif event['RequestType'] == "Create":
response_type = cfn_create_request()
elif event['RequestType'] == "Update":
response_type = cfn_update_request()
elif event['RequestType'] == "Create Traceback":
log.error("Create error occured and rollback intitiated.")
elif event['RequestType'] == "Delete Traceback":
log.exception('Error with delete action occured.' +
'View the logs for more deatils.')
raise Exception
elif event['RequestType'] is None:
log.exception('Event recieved was empty.')
else:
log.exception('Unknown request type')
raise Exception
except Exception as e:
log.exception('Error: failed on event: {}'.format(event))
print(str(e), e.args)
raise
finally:
cfnresponse.send(event, context, response_type, response_data, reason,
"CustomResourcePhyiscalID")
| []
| []
| [
"FUNCTION_NAME",
"REGION"
]
| [] | ["FUNCTION_NAME", "REGION"] | python | 2 | 0 | |
codegen/golang/util.go | package golang
import (
"os"
"path"
"path/filepath"
"strings"
)
// try to set root import path based on target dir
// - do nothing if user specify import path
// - if target dir is under GOPATH, set it to the target dir
func setRootImportPath(importPath, targetDir string) string {
// use import path if not empty
if len(importPath) > 0 {
return importPath
}
// get GOPATH dir
gopath, err := filepath.Abs(os.Getenv("GOPATH"))
if err != nil {
return ""
}
gopath = filepath.Join(gopath, "src")
// get absolute target dir
absTargetDir, err := filepath.Abs(targetDir)
if err != nil {
panic("invalid targetDir:" + err.Error())
}
// panic if user doesn't specify import path and target dir
// not under GOPATH.
if !strings.HasPrefix(absTargetDir, gopath) {
panic("please specify '--import-path' or set '--dir' under your GOPATH")
}
// set import path
newImportPath, err := filepath.Rel(gopath, absTargetDir)
if err != nil {
panic("failed to set import path automatically:" + err.Error())
}
// re-join because otherwise windows will use `\`
return path.Join(strings.Split(newImportPath, string(filepath.Separator))...)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
main.go | package main
import (
"os"
"runtime"
"github.com/appscode/kubed/pkg/cmds"
_ "k8s.io/client-go/kubernetes/fake"
"kmodules.xyz/client-go/logs"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if err := cmds.NewCmdKubed(Version).Execute(); err != nil {
os.Exit(1)
}
}
| [
"\"GOMAXPROCS\""
]
| []
| [
"GOMAXPROCS"
]
| [] | ["GOMAXPROCS"] | go | 1 | 0 | |
cmd/docker-firewall/main.go | package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/albertogviana/docker-firewall/config"
"github.com/albertogviana/docker-firewall/firewall"
"github.com/urfave/cli"
)
var pidFile = "/tmp/docker-firewall"
var configPath = "/etc/docker-firewall"
var (
version string
gitCommit string
)
func main() {
if os.Getenv("CONFIG_PATH") != "" {
configPath = os.Getenv("CONFIG_PATH")
}
if version == "" {
version = "not specified"
}
if gitCommit == "" {
gitCommit = "not specified"
}
cli.VersionPrinter = func(c *cli.Context) {
fmt.Printf("version: %s\ngit commit: %s\n", c.App.Version, gitCommit)
}
app := cli.NewApp()
app.Name = "docker-firewall"
app.Usage = "Easy way to apply firewall rules to block docker services."
app.Version = version
app.Commands = []cli.Command{
{
Name: "start",
Usage: "start the service",
Action: func(c *cli.Context) error {
start()
return nil
},
},
{
Name: "stop",
Usage: "stop the service",
Action: func(c *cli.Context) error {
stop()
return nil
},
},
}
err := app.Run(os.Args)
if err != nil {
log.Fatalf("there was an error and it was not possible to run the docker-firewall: %v", err)
}
}
func start() {
log.Println("Starting docker-firewall")
config, err := config.NewConfiguration(configPath)
if err != nil {
log.Fatalf("failed to read the configuration file: %v", err)
}
firewall, err := firewall.NewFirewall()
if err != nil {
log.Fatalf("failed to start firewall: %v", err)
}
log.Println("Applying rules")
err = firewall.Apply(config.Config.Rules)
if err != nil {
stop()
log.Fatalf("it was not possible to apply the rules: %v", err)
}
log.Println("Rules applied")
err = writePidFile()
if err != nil {
stop()
log.Fatalf("failed to create pid file with error: %v", err)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGQUIT)
exitChan := make(chan int)
go func() {
for {
s := <-signalChan
log.Printf("Received signal: %s", s)
switch s {
// kill -SIGHUP XXXX
case syscall.SIGHUP:
log.Println("Reloading configuration")
stop()
start()
// kill -SIGTERM XXXX
case syscall.SIGTERM:
log.Println("stop and core dump")
stop()
exitChan <- 0
// kill -SIGQUIT XXXX
case syscall.SIGQUIT:
log.Println("Stopping the service")
stop()
exitChan <- 0
default:
log.Println("Unknown signal.")
stop()
exitChan <- 1
}
}
}()
for {
time.Sleep(10 * time.Second)
verify, err := firewall.Verify(config.Config.Rules)
if err != nil {
log.Printf("Something went wrong: %s", err)
stop()
exitChan <- 1
}
if !verify {
log.Println("Applying rules again.")
firewall.Apply(config.Config.Rules)
}
}
code := <-exitChan
os.Exit(code)
}
func stop() {
firewall, err := firewall.NewFirewall()
if err != nil {
log.Fatal(err)
}
firewall.ClearRule()
if _, err := os.Stat(pidFile); os.IsExist(err) {
piddata, err := ioutil.ReadFile(pidFile)
if err != nil {
log.Fatal(err)
}
// Convert the file contents to an integer.
pid, err := strconv.Atoi(string(piddata))
if err != nil {
log.Fatal(err)
}
err = syscall.Kill(pid, syscall.SIGTERM)
if err != nil {
log.Println(err)
}
os.Remove(pidFile)
}
}
func writePidFile() error {
if _, err := os.Stat(pidFile); !os.IsNotExist(err) {
piddata, err := ioutil.ReadFile(pidFile)
if err != nil {
return err
}
pid, err := strconv.Atoi(string(piddata))
if err != nil {
return err
}
process, err := os.FindProcess(pid)
if err != nil {
return err
}
// Send the process a signal zero kill.
if err := process.Signal(syscall.Signal(0)); err == nil {
return fmt.Errorf("pid already running: %d", pid)
}
}
return ioutil.WriteFile(pidFile, []byte(fmt.Sprintf("%d", os.Getpid())), 0664)
}
| [
"\"CONFIG_PATH\"",
"\"CONFIG_PATH\""
]
| []
| [
"CONFIG_PATH"
]
| [] | ["CONFIG_PATH"] | go | 1 | 0 | |
broker/googlepubsub/googlepubsub.go | // Package googlepubsub provides a Google cloud pubsub broker
package googlepubsub
import (
"context"
"os"
"time"
"cloud.google.com/go/pubsub"
"github.com/google/uuid"
"github.com/go-iot-platform/go-micro/broker"
log "github.com/go-iot-platform/go-micro/logger"
"google.golang.org/api/option"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type pubsubBroker struct {
client *pubsub.Client
options broker.Options
}
// A pubsub subscriber that manages handling of messages
type subscriber struct {
options broker.SubscribeOptions
topic string
exit chan bool
sub *pubsub.Subscription
}
// A single publication received by a handler
type publication struct {
pm *pubsub.Message
m *broker.Message
topic string
err error
}
func (s *subscriber) run(hdlr broker.Handler) {
if s.options.Context != nil {
if max, ok := s.options.Context.Value(maxOutstandingMessagesKey{}).(int); ok {
s.sub.ReceiveSettings.MaxOutstandingMessages = max
}
if max, ok := s.options.Context.Value(maxExtensionKey{}).(time.Duration); ok {
s.sub.ReceiveSettings.MaxExtension = max
}
}
ctx, cancel := context.WithCancel(context.Background())
for {
select {
case <-s.exit:
cancel()
return
default:
s.sub.Receive(ctx, func(ctx context.Context, pm *pubsub.Message) {
// create broker message
m := &broker.Message{
Header: pm.Attributes,
Body: pm.Data,
}
// If the error is nil lets check if we should auto ack
hdlr(m)
})
}
}
}
func (s *subscriber) Options() broker.SubscribeOptions {
return s.options
}
func (s *subscriber) Topic() string {
return s.topic
}
func (s *subscriber) Unsubscribe() error {
select {
case <-s.exit:
return nil
default:
close(s.exit)
if deleteSubscription, ok := s.options.Context.Value(deleteSubscription{}).(bool); !ok || deleteSubscription {
return s.sub.Delete(context.Background())
}
return nil
}
}
func (p *publication) Ack() error {
p.pm.Ack()
return nil
}
func (p *publication) Error() error {
return p.err
}
func (p *publication) Topic() string {
return p.topic
}
func (p *publication) Message() *broker.Message {
return p.m
}
func (b *pubsubBroker) Address() string {
return ""
}
func (b *pubsubBroker) Connect() error {
return nil
}
func (b *pubsubBroker) Disconnect() error {
return b.client.Close()
}
// Init not currently implemented
func (b *pubsubBroker) Init(opts ...broker.Option) error {
return nil
}
func (b *pubsubBroker) Options() broker.Options {
return b.options
}
// Publish checks if the topic exists and then publishes via google pubsub
func (b *pubsubBroker) Publish(topic string, msg *broker.Message, opts ...broker.PublishOption) (err error) {
t := b.client.Topic(topic)
ctx := context.Background()
m := &pubsub.Message{
ID: "m-" + uuid.New().String(),
Data: msg.Body,
Attributes: msg.Header,
}
pr := t.Publish(ctx, m)
if _, err = pr.Get(ctx); err != nil {
// create Topic if not exists
if status.Code(err) == codes.NotFound {
log.Infof("Topic not exists. creating Topic: %s", topic)
if t, err = b.client.CreateTopic(ctx, topic); err == nil {
_, err = t.Publish(ctx, m).Get(ctx)
}
}
}
return
}
// Subscribe registers a subscription to the given topic against the google pubsub api
func (b *pubsubBroker) Subscribe(topic string, h broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
options := broker.SubscribeOptions{
Queue: "q-" + uuid.New().String(),
Context: b.options.Context,
}
for _, o := range opts {
o(&options)
}
ctx := context.Background()
sub := b.client.Subscription(options.Queue)
if createSubscription, ok := b.options.Context.Value(createSubscription{}).(bool); !ok || createSubscription {
exists, err := sub.Exists(ctx)
if err != nil {
return nil, err
}
if !exists {
tt := b.client.Topic(topic)
subb, err := b.client.CreateSubscription(ctx, options.Queue, pubsub.SubscriptionConfig{
Topic: tt,
AckDeadline: time.Duration(0),
})
if err != nil {
return nil, err
}
sub = subb
}
}
subscriber := &subscriber{
options: options,
topic: topic,
exit: make(chan bool),
sub: sub,
}
go subscriber.run(h)
return subscriber, nil
}
func (b *pubsubBroker) String() string {
return "googlepubsub"
}
// NewBroker creates a new google pubsub broker
func NewBroker(opts ...broker.Option) broker.Broker {
options := broker.Options{
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
// retrieve project id
prjID, _ := options.Context.Value(projectIDKey{}).(string)
// if `GOOGLEPUBSUB_PROJECT_ID` is present, it will overwrite programmatically set projectID
if envPrjID := os.Getenv("GOOGLEPUBSUB_PROJECT_ID"); len(envPrjID) > 0 {
prjID = envPrjID
}
// retrieve client opts
cOpts, _ := options.Context.Value(clientOptionKey{}).([]option.ClientOption)
// create pubsub client
c, err := pubsub.NewClient(context.Background(), prjID, cOpts...)
if err != nil {
panic(err.Error())
}
return &pubsubBroker{
client: c,
options: options,
}
}
| [
"\"GOOGLEPUBSUB_PROJECT_ID\""
]
| []
| [
"GOOGLEPUBSUB_PROJECT_ID"
]
| [] | ["GOOGLEPUBSUB_PROJECT_ID"] | go | 1 | 0 | |
org.hl7.fhir.r5/src/test/java/org/hl7/fhir/r5/test/ProfileUtilitiesTests.java | package org.hl7.fhir.r5.test;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.fhir.ucum.UcumException;
import org.hl7.fhir.exceptions.FHIRException;
import org.hl7.fhir.r5.conformance.ProfileUtilities;
import org.hl7.fhir.r5.formats.IParser.OutputStyle;
import org.hl7.fhir.r5.formats.XmlParser;
import org.hl7.fhir.r5.model.Base;
import org.hl7.fhir.r5.model.ElementDefinition;
import org.hl7.fhir.r5.model.StructureDefinition;
import org.hl7.fhir.r5.model.StructureDefinition.TypeDerivationRule;
import org.hl7.fhir.r5.test.utils.TestingUtilities;
import org.hl7.fhir.r5.utils.EOperationOutcome;
import org.hl7.fhir.utilities.CSFile;
import org.hl7.fhir.utilities.Utilities;
import org.hl7.fhir.utilities.validation.ValidationMessage;
import org.junit.jupiter.api.Test;
public class ProfileUtilitiesTests {
// /**
// * This is simple: we just create an empty differential, generate the snapshot, and then insist it must match the base
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
@Test
public void testSimple() throws FHIRException, FileNotFoundException, IOException, UcumException {
StructureDefinition focus = new StructureDefinition();
StructureDefinition base = TestingUtilities.context().fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
focus.setUrl(Utilities.makeUuidUrn());
focus.setBaseDefinition(base.getUrl());
focus.setType("Patient");
focus.setDerivation(TypeDerivationRule.CONSTRAINT);
List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
new ProfileUtilities(TestingUtilities.context(), messages, null).generateSnapshot(base, focus, focus.getUrl(), "http://test.org/test", "Simple Test");
boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
ElementDefinition b = base.getSnapshot().getElement().get(i);
ElementDefinition f = focus.getSnapshot().getElement().get(i);
if (ok) {
if (!f.hasBase())
ok = false;
else if (!b.getPath().equals(f.getPath()))
ok = false;
else {
b.setBase(null);
f.setBase(null);
b.setRequirements(null);
f.setRequirements(null);
ok = Base.compareDeep(b, f, true);
}
}
}
if (!ok) {
compareXml(base, focus);
throw new FHIRException("Snap shot generation simple test failed");
} else
System.out.println("Snap shot generation simple test passed");
}
//
// /**
// * This is simple: we just create an empty differential, generate the snapshot, and then insist it must match the base. for a different resource with recursion
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
@Test
public void testSimple2() throws EOperationOutcome, Exception {
StructureDefinition base = TestingUtilities.context().fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/ValueSet").copy();
StructureDefinition focus = base.copy();
focus.setUrl(Utilities.makeUuidUrn());
focus.setSnapshot(null);
focus.setDifferential(null);
List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
new ProfileUtilities(TestingUtilities.context(), messages, null).generateSnapshot(base, focus, focus.getUrl(), "http://test.org", "Simple Test");
boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
if (ok) {
ElementDefinition b = base.getSnapshot().getElement().get(i);
ElementDefinition f = focus.getSnapshot().getElement().get(i);
if (!f.hasBase() || !b.getPath().equals(f.getPath()))
ok = false;
else {
f.setBase(null);
b.setBase(null);
b.setRequirements(null);
f.setRequirements(null);
b.setComment(null);
f.setComment(null);
b.setDefinition(null);
f.setDefinition(null);
ok = Base.compareDeep(b, f, true);
}
}
}
if (!ok) {
compareXml(base, focus);
System.out.println("Snap shot generation simple test failed");
throw new FHIRException("Snap shot generation simple test failed");
} else
System.out.println("Snap shot generation simple test passed");
}
// /**
// * Change one cardinality.
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void testCardinalityChange() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.setMin(1);
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.getMin() == 1;
// if (ok)
// f.setMin(0);
// }
// ok = ok && Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation chenge cardinality test failed");
// } else
// System.out.println("Snap shot generation chenge cardinality test passed");
// }
//
// /**
// * check that documentation appending is working
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void testDocumentationAppend() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.setDefinition("... some more doco");
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.getDefinition().length() > b.getDefinition().length();
// if (ok) {
// f.setDefinition(null);
// b.setDefinition(null);
// }
// }
// ok = ok && Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation documentation append failed");
// } else
// System.out.println("Snap shot generation documentation append test passed");
// }
//
//
// /**
// * check that narrowing types is working
// * this one doesn't rename the path
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void textTypeNarrowing1() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.deceased[x]");
// id.addType().setCode("dateTime");
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.deceasedDateTime")) {
// ok = f.getType().size() == 1 && f.getType().get(0).getCode().equals("dateTime");
// if (ok) {
// f.getType().clear();
// b.getType().clear();
// f.setPath(b.getPath());
// }
// }
// ok = ok && Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation narrow type 1 failed");
// } else
// System.out.println("Snap shot generation narrow type 1 test passed");
// }
//
// /**
// * check that narrowing types is working
// * this one renames the path
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void textTypeNarrowing2() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.deceasedDateTime");
// id.addType().setCode("dateTime");
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.deceasedDateTime")) {
// ok = f.getType().size() == 1 && f.getType().get(0).getCode().equals("dateTime");
// if (ok) {
// f.getType().clear();
// b.getType().clear();
// f.setPath(b.getPath());
// }
// }
// ok = ok && Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation narrow type 2 failed");
// } else
// System.out.println("Snap shot generation narrow type 2 test passed");
// }
//
// /**
// * check that mapping resolution is working
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void testMapping() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.addMapping().setIdentity("rim").setMap("test");
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size();
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.getMapping().size() > b.getMapping().size();
// if (ok) {
// f.getMapping().clear();
// b.getMapping().clear();
// }
// }
// ok = ok && Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation mapping changes failed");
// } else
// System.out.println("Snap shot generation mapping changes test passed");
// }
//
// /**
// * Walking into a type
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void testTypeWalk() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.setMustSupport(true);
// id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier.system");
// id.setMustSupport(true);
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // the derived should be 8 longer
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 8;
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i <= 9 ? i : i + 8);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.getMustSupport() && !b.getMustSupport();
// if (ok) {
// f.setMustSupportElement(null);
// }
// }
// ok = Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation simple test failed");
// } else
// System.out.println("Snap shot generation simple test passed");
// }
//
// /**
// * Walking into a type, without explicitly doing so
// *
// * note: this currently fails.
// *
// * @param context2
// * @
// * @throws EOperationOutcome
// */
// private void testTypeWalk2() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier.system");
// id.setMustSupport(true);
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // the derived should be 8 longer
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 8;
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i <= 9 ? i : i + 8);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.getMustSupport() && !b.getMustSupport();
// if (ok) {
// f.setMustSupportElement(null);
// }
// }
// ok = Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation simple test failed");
// } else
// System.out.println("Snap shot generation simple test passed");
// }
//
//
// /**
// * we're going to slice Patient.identifier
// */
// private void testSlicingSimple() throws EOperationOutcome, Exception {
//
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
//
// // set the slice up
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.getSlicing().setOrdered(false).setRules(SlicingRules.OPEN).addDiscriminator().setPath("use").setType(DiscriminatorType.VALUE);
//
// // first slice:
// id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.setSliceName("name1");
// id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier.use");
// id.setFixed(new CodeType("usual"));
//
// // second slice:
// id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier");
// id.setSliceName("name2");
// id = focus.getDifferential().addElement();
// id.setPath("Patient.identifier.use");
// id.setFixed(new CodeType("official"));
//
//
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // 18 different: identifier + 8 inner children * 2
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 18;
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i <= 9 ? i : i + 18);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.identifier")) {
// ok = f.hasSlicing();
// if (ok)
// f.setSlicing(null);
// }
// ok = Base.compareDeep(b, f, true);
// }
// }
// }
// // now, check that the slices we skipped are correct:
// for (int i = 10; i <= 18; i++) {
// if (ok) {
// ElementDefinition d1 = focus.getSnapshot().getElement().get(i);
// ElementDefinition d2 = focus.getSnapshot().getElement().get(i+9);
// if (d1.getPath().equals("Patient.identifier.use")) {
// ok = d1.hasFixed() && d2.hasFixed() && !Base.compareDeep(d1.getFixed(), d2.getFixed(), true);
// if (ok) {
// d1.setFixed(null);
// d2.setFixed(null);
// }
// }
// if (d1.getPath().equals("Patient.identifier")) {
// ok = d1.hasSliceName() && d2.hasSliceName() && !Base.compareDeep(d1.getSliceNameElement(), d2.getSliceNameElement(), true);
// if (ok) {
// d1.setSliceName(null);
// d2.setSliceName(null);
// }
// }
// ok = Base.compareDeep(d1, d2, true);
// }
// }
// // for throughness, we could check against identifier too, but this is not done now.
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation slicing failed");
// } else
// System.out.println("Snap shot generation slicing passed");
//
// }
//
// /**
// * we're going to slice Patient.extension and refer to extension by profile
// *
// * implicit: whether to rely on implicit extension slicing
// */
// private void testSlicingExtension(boolean implicit) throws EOperationOutcome, Exception {
//
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
//
// // set the slice up
// ElementDefinition id;
// if (!implicit) {
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.getSlicing().setOrdered(false).setRules(SlicingRules.OPEN).addDiscriminator().setPath("url").setType(DiscriminatorType.VALUE);
// id.setMax("3");
// }
// // first slice:
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.setSliceName("name1");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-birthTime");
// id.setMin(1);
//
// // second slice:
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.setSliceName("name2");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-mothersMaidenName");
//
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// ProfileUtilities pu = new ProfileUtilities(context, messages, null);
// pu.generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // 2 different: extension slices
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 2;
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i <= 7 ? i : i + 2);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.extension")) {
// ok = f.hasSlicing() && (implicit || f.getMax().equals("3"));
// if (ok) {
// f.setSlicing(null);
// f.setMaxElement(b.getMaxElement());
// }
// }
// if (!f.getPath().equals("Patient.extension")) // no compare that because the definitions get overwritten
// ok = Base.compareDeep(b, f, true);
// }
// }
// }
// // now, check that the slices we skipped are correct:
// if (ok) {
// ElementDefinition d1 = focus.getSnapshot().getElement().get(8);
// ElementDefinition d2 = focus.getSnapshot().getElement().get(9);
// ok = d1.hasType() && d1.getType().get(0).hasProfile() && d2.hasType() && d2.getType().get(0).hasProfile() && !Base.compareDeep(d1.getType(), d2.getType(), true) &&
// d1.getMin() == 1 && d2.getMin() == 0 && d1.getMax().equals("1") && d2.getMax().equals("1");
// if (ok) {
// d1.getType().clear();
// d2.getType().clear();
// d1.setSliceName("x");
// d2.setSliceName("x");
// d1.setMin(0);
// }
// ok = Base.compareDeep(d1, d2, true);
// // for throughness, we could check against extension too, but this is not done now.
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation slicing extensions simple ("+(implicit ? "implicit" : "not implicit")+") failed");
// } else
// System.out.println("Snap shot generation slicing extensions simple ("+(implicit ? "implicit" : "not implicit")+") passed");
// }
//
// /**
// * we're going to slice Patient.extension and refer to extension by profile. one of the extensions is complex, and we're going to walk into
// * it and make it must support
// *
// * implicit: whether to rely on implicit extension slicing
// */
// private void testSlicingExtensionComplex(boolean implicit) throws EOperationOutcome, Exception {
//
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Patient").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
//
// // set the slice up
// ElementDefinition id;
// if (!implicit) {
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.getSlicing().setOrdered(false).setRules(SlicingRules.OPEN).addDiscriminator().setPath("url").setType(DiscriminatorType.VALUE);
// }
// // first slice - a simple one to get us going:
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.setSliceName("simple");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-birthTime");
//
// // second slice - the complex one
// // we walk into this and fix properties on the inner extensions
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension");
// id.setSliceName("complex");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-nationality");
// if (!implicit) {
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension.extension");
// id.getSlicing().setOrdered(false).setRules(SlicingRules.OPEN).addDiscriminator().setPath("url").setType(DiscriminatorType.VALUE);
// }
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension.extension");
// id.setSliceName("code");
// id.setMustSupport(true);
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-nationality#code");
//
// id = focus.getDifferential().addElement();
// id.setPath("Patient.extension.extension");
// id.setSliceName("period");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/patient-nationality#period");
// id.setMax("0"); // prohibit this one....
//
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // ok, there's going to 1 (simple) + complex: 1 + id + extnesion.slice + extension.code + (4 inside from that) + extension.period + (4 inside from that) + value + url = 16
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 16;
//
// // custom checks
// ok = ok && rule(focus.getSnapshot().getElement().get(7).getPath().equals("Patient.extension"), "element 7 (base) path");
// ok = ok && rule(focus.getSnapshot().getElement().get(7).hasSlicing(), "element 7 slicing");
// ok = ok && rule(focus.getSnapshot().getElement().get(8).getPath().equals("Patient.extension"), "element 8 (1st slice) path");
// ok = ok && rule(focus.getSnapshot().getElement().get(8).getSliceName().equals("simple"), "element 8 (1st slice) name");
// ok = ok && rule(focus.getSnapshot().getElement().get(8).getType().get(0).getProfile().equals("http://hl7.org/fhir/StructureDefinition/patient-birthTime"), "element 9 (2nd slice) profile name");
// ok = ok && rule(focus.getSnapshot().getElement().get(9).getPath().equals("Patient.extension"), "element 9 (2nd slice) path");
// ok = ok && rule(focus.getSnapshot().getElement().get(9).getSliceName().equals("complex"), "element 8 (1st slice) name");
// ok = ok && rule(focus.getSnapshot().getElement().get(9).getType().get(0).getProfile().equals("http://hl7.org/fhir/StructureDefinition/patient-nationality"), "element 9 (2nd slice) profile name");
// ok = ok && rule(focus.getSnapshot().getElement().get(10).getPath().equals("Patient.extension.id"), "element 10 (2nd slice).id path");
// ok = ok && rule(focus.getSnapshot().getElement().get(11).getPath().equals("Patient.extension.extension"), "element 11 (2nd slice).extension path");
// ok = ok && rule(focus.getSnapshot().getElement().get(12).getPath().equals("Patient.extension.extension"), "element 12 (2nd slice).extension path");
// ok = ok && rule(focus.getSnapshot().getElement().get(12).getMustSupport(), "element 12 (2nd slice).extension must support");
// ok = ok && rule(focus.getSnapshot().getElement().get(13).getPath().equals("Patient.extension.extension.id"), "element 13 (2nd slice).extension.id path");
// ok = ok && rule(focus.getSnapshot().getElement().get(14).getPath().equals("Patient.extension.extension.extension"), "element 14 (2nd slice).extension.extension path");
// ok = ok && rule(focus.getSnapshot().getElement().get(15).getPath().equals("Patient.extension.extension.url"), "element 15 (2nd slice).extension.url path");
// ok = ok && rule(focus.getSnapshot().getElement().get(16).getPath().equals("Patient.extension.extension.valueCodeableConcept"), "element 16 (2nd slice).extension.valueCodeableConcept path");
// ok = ok && rule(focus.getSnapshot().getElement().get(17).getPath().equals("Patient.extension.extension"), "element 17 (2nd slice).extension path");
// ok = ok && rule(focus.getSnapshot().getElement().get(17).getMax().equals("0"), "element 17 (2nd slice).extension cardinality");
// ok = ok && rule(focus.getSnapshot().getElement().get(18).getPath().equals("Patient.extension.extension.id"), "element 18 (2nd slice).extension.id path");
// ok = ok && rule(focus.getSnapshot().getElement().get(19).getPath().equals("Patient.extension.extension.extension"), "element 19 (2nd slice).extension.extension path");
// ok = ok && rule(focus.getSnapshot().getElement().get(20).getPath().equals("Patient.extension.extension.url"), "element 20 (2nd slice).extension.url path");
// ok = ok && rule(focus.getSnapshot().getElement().get(21).getPath().equals("Patient.extension.extension.valuePeriod"), "element 21 (2nd slice).extension.valuePeriod path");
// ok = ok && rule(focus.getSnapshot().getElement().get(22).getPath().equals("Patient.extension.url"), "element 22 (2nd slice).url path");
// ok = ok && rule(focus.getSnapshot().getElement().get(23).getPath().equals("Patient.extension.value[x]"), "element 23 (2nd slice).url path");
//
// for (int i = 0; i < base.getSnapshot().getElement().size(); i++) {
// if (ok) {
// ElementDefinition b = base.getSnapshot().getElement().get(i);
// ElementDefinition f = focus.getSnapshot().getElement().get(i <= 7 ? i : i + 16);
// if (!f.hasBase() || !b.getPath().equals(f.getBase().getPath()))
// ok = false;
// else {
// f.setBase(null);
// if (f.getPath().equals("Patient.extension")) {
// ok = f.hasSlicing();
// if (ok)
// f.setSlicing(null);
// }
// if (!f.getPath().equals("Patient.extension")) // no compare that because the definitions get overwritten
// ok = Base.compareDeep(b, f, true);
// }
// }
// }
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation slicing extensions complex ("+(implicit ? "implicit" : "not implicit")+") failed");
// } else
// System.out.println("Snap shot generation slicing extensions complex ("+(implicit ? "implicit" : "not implicit")+") passed");
// }
//
// private void testSlicingTask8742() throws EOperationOutcome, Exception {
// StructureDefinition focus = new StructureDefinition();
// StructureDefinition base = context.fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/Organization").copy();
// focus.setUrl(Utilities.makeUuidUrn());
// focus.setBaseDefinition(base.getUrl());
// focus.setType(base.getType());
// focus.setDerivation(TypeDerivationRule.CONSTRAINT);
//
// ElementDefinition id = focus.getDifferential().addElement();
// id.setPath("Organization.address");
// id.setMin(1);
// id.setMax("1");
// id.setMustSupport(true);
//
// id = focus.getDifferential().addElement();
// id.setPath("Organization.address.extension");
// id.setSliceName("USLabCountycodes");
// id.getSlicing().setOrdered(false).setRules(SlicingRules.OPEN).addDiscriminator().setPath("url").setType(DiscriminatorType.VALUE);
// id.setShort("County/Parish FIPS codes");
// id.setDefinition("County/Parish FIPS codes.");
// id.setRequirements("County/Parish Code SHALL use FIPS 6-4 ( INCITS 31:2009).");
// id.setMin(0);
// id.setMax("1");
// id.addType().setCode("Extension").setProfile("http://hl7.org/fhir/StructureDefinition/us-core-county");
// id.setMustSupport(true);
// id.getBinding().setStrength(BindingStrength.REQUIRED).setDescription("FIPS codes for US counties and county equivalent entities.").setValueSet(new Reference().setReference("http://hl7.org/fhir/ValueSet/fips-county"));
// List<ValidationMessage> messages = new ArrayList<ValidationMessage>();
//
// new ProfileUtilities(context, messages, null).generateSnapshot(base, focus, focus.getUrl(), "Simple Test" );
//
// // 14 for address with one sliced extension
// boolean ok = base.getSnapshot().getElement().size() == focus.getSnapshot().getElement().size() - 13;
//
// if (!ok) {
// compareXml(base, focus);
// throw new FHIRException("Snap shot generation test 8742 failed");
// } else
// System.out.println("Snap shot generation test 8742 passed");
// }
//
//
// private boolean rule(boolean ok, String message) {
// if (!ok)
// System.out.println("Test failed: " + message);
// return ok;
// }
//
private void compareXml(StructureDefinition base, StructureDefinition focus) throws FileNotFoundException, IOException {
base.setText(null);
focus.setText(null);
base.setDifferential(null);
// focus.setDifferential(null);
String f1 = Utilities.path("c:", "temp", "base.xml");
String f2 = Utilities.path("c:", "temp", "derived.xml");
new XmlParser().setOutputStyle(OutputStyle.PRETTY).compose(new FileOutputStream(f1), base);
;
new XmlParser().setOutputStyle(OutputStyle.PRETTY).compose(new FileOutputStream(f2), focus);
;
String diff = Utilities.path(System.getenv("ProgramFiles(X86)"), "WinMerge", "WinMergeU.exe");
List<String> command = new ArrayList<String>();
command.add("\"" + diff + "\" \"" + f1 + "\" \"" + f2 + "\"");
ProcessBuilder builder = new ProcessBuilder(command);
builder.directory(new CSFile("c:\\temp"));
builder.start();
}
} | [
"\"ProgramFiles(X86"
]
| []
| [
"ProgramFiles(X8"
]
| [] | ["ProgramFiles(X8"] | java | 1 | 0 | |
infrastructure/database/mysql.go | package database
import (
"database/sql"
"fmt"
"log"
"os"
_ "github.com/go-sql-driver/mysql"
)
// mySQL stores the database structure
type mySQL struct {
DB *sql.DB
}
// NewMySQL return mySQL with database connection
func NewMySQL() (*mySQL, error) {
var ds = fmt.Sprintf("%s:%s@tcp(%s:%s)/%s",
os.Getenv("MYSQL_USER"),
os.Getenv("MYSQL_PASSWORD"),
os.Getenv("MYSQL_HOST"),
os.Getenv("MYSQL_PORT"),
os.Getenv("MYSQL_DATABASE"),
)
db, err := sql.Open(os.Getenv("MYSQL_DRIVER"), ds)
if err != nil {
return &mySQL{}, nil
}
if err = db.Ping(); err != nil {
panic(err)
}
log.Println("Successfully connected to the MySQL database")
return &mySQL{DB: db}, nil
}
| [
"\"MYSQL_USER\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_DATABASE\"",
"\"MYSQL_DRIVER\""
]
| []
| [
"MYSQL_PASSWORD",
"MYSQL_USER",
"MYSQL_PORT",
"MYSQL_DRIVER",
"MYSQL_DATABASE",
"MYSQL_HOST"
]
| [] | ["MYSQL_PASSWORD", "MYSQL_USER", "MYSQL_PORT", "MYSQL_DRIVER", "MYSQL_DATABASE", "MYSQL_HOST"] | go | 6 | 0 | |
pkg/util/kubeutil/kubeutil.go | //
// Copyright 2020 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package kubeutil
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var cfg *rest.Config
type podGetterFunc func(obj *unstructured.Unstructured) ([]*corev1.Pod, error)
var podGetterMapper map[string]podGetterFunc = map[string]podGetterFunc{
"Deployment": getPodsFromDeployment,
// TODO: implement below
"ReplicaSet": getPodsNotImplemented,
"Pod": getPodsNotImplemented,
"DaemonSet": getPodsNotImplemented,
"StatefulSet": getPodsNotImplemented,
}
func GetInClusterConfig() (*rest.Config, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
return config, nil
}
func IsInCluster() bool {
_, err := rest.InClusterConfig()
if err == nil {
return true
} else {
return false
}
}
func GetOutOfClusterConfig() (*rest.Config, error) {
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
home := os.Getenv("HOME")
if home == "" {
home = os.Getenv("USERPROFILE")
}
kubeconfigPath = filepath.Join(home, ".kube", "config")
}
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
return config, nil
}
func GetKubeConfig() (*rest.Config, error) {
if cfg != nil {
return cfg, nil
}
config, err := GetInClusterConfig()
if err != nil || config == nil {
config, err = GetOutOfClusterConfig()
}
if err != nil || config == nil {
return nil, err
}
return config, nil
}
func SetKubeConfig(conf *rest.Config) {
if conf != nil {
cfg = conf
}
}
func MatchLabels(obj metav1.Object, labelSelector *metav1.LabelSelector) (bool, error) {
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
return false, err
}
labelsMap := obj.GetLabels()
labelsSet := labels.Set(labelsMap)
matched := selector.Matches(labelsSet)
return matched, nil
}
func GetAPIResources() ([]metav1.APIResource, error) {
config, err := GetKubeConfig()
if err != nil {
return nil, fmt.Errorf("Error in getting k8s config; %s", err.Error())
}
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, fmt.Errorf("Error in creating discovery client; %s", err.Error())
}
apiResourceLists, err := discoveryClient.ServerPreferredResources()
if err != nil {
return nil, fmt.Errorf("Error in getting server preferred resources; %s", err.Error())
}
resources := []metav1.APIResource{}
for _, apiResourceList := range apiResourceLists {
if len(apiResourceList.APIResources) == 0 {
continue
}
gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
if err != nil {
continue
}
for _, resource := range apiResourceList.APIResources {
if len(resource.Verbs) == 0 {
continue
}
resource.Group = gv.Group
resource.Version = gv.Version
resources = append(resources, resource)
}
}
return resources, nil
}
func GetNamespaces() ([]*corev1.Namespace, error) {
config, err := GetKubeConfig()
if err != nil {
return nil, fmt.Errorf("error in getting k8s config; %s", err.Error())
}
client, err := corev1client.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error in creating Core V1 Client; %s", err.Error())
}
nsList, err := client.Namespaces().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("error in listing namespaces; %s", err.Error())
}
namespaces := []*corev1.Namespace{}
for i := range nsList.Items {
ns := nsList.Items[i]
namespaces = append(namespaces, &ns)
}
return namespaces, nil
}
func GetResource(apiVersion, kind, namespace, name string) (*unstructured.Unstructured, error) {
var gv schema.GroupVersion
var err error
skipGV := false
if apiVersion == "" {
// if apiVersion is not specified, just use kind to identify resource kind
skipGV = true
} else {
gv, err = schema.ParseGroupVersion(apiVersion)
if err != nil {
return nil, fmt.Errorf("Error in parsing apiVersion; %s", err.Error())
}
}
apiResources, err := GetAPIResources()
if err != nil {
return nil, fmt.Errorf("Error in getting API Resources; %s", err.Error())
}
namespaced := true
gvr := schema.GroupVersionResource{}
for _, r := range apiResources {
gOk := (r.Group == gv.Group) || skipGV
vOk := (r.Version == gv.Version) || skipGV
kOk := (r.Kind == kind) || (r.Name == kind) || (r.SingularName == kind) || contains(r.ShortNames, kind)
if gOk && vOk && kOk {
gvr = schema.GroupVersionResource{
Group: r.Group,
Version: r.Version,
Resource: r.Name,
}
namespaced = r.Namespaced
}
}
if gvr.Resource == "" {
return nil, fmt.Errorf("Failed to find GroupVersionKind matches apiVerions: %s, kind: %s", apiVersion, kind)
}
config, err := GetKubeConfig()
if err != nil {
return nil, fmt.Errorf("Error in getting k8s config; %s", err.Error())
}
dyClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("Error in creating DynamicClient; %s", err.Error())
}
var resource *unstructured.Unstructured
if namespaced {
resource, err = dyClient.Resource(gvr).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{})
} else {
resource, err = dyClient.Resource(gvr).Get(context.Background(), name, metav1.GetOptions{})
}
if err != nil {
return nil, fmt.Errorf("Error in getting resource; %s", err.Error())
}
return resource, nil
}
func ListResources(apiVersion, kind, namespace string) ([]*unstructured.Unstructured, error) {
var gv schema.GroupVersion
var err error
skipGV := false
if apiVersion == "" {
// if apiVersion is not specified, just use kind to identify resource kind
skipGV = true
} else {
gv, err = schema.ParseGroupVersion(apiVersion)
if err != nil {
return nil, fmt.Errorf("Error in parsing apiVersion; %s", err.Error())
}
}
gv, err = schema.ParseGroupVersion(apiVersion)
if err != nil {
return nil, fmt.Errorf("Error in parsing apiVersion; %s", err.Error())
}
apiResources, err := GetAPIResources()
if err != nil {
return nil, fmt.Errorf("Error in getting API Resources; %s", err.Error())
}
namespaced := true
gvr := schema.GroupVersionResource{}
for _, r := range apiResources {
gOk := (r.Group == gv.Group) || skipGV
vOk := (r.Version == gv.Version) || skipGV
kOk := (r.Kind == kind) || (r.Name == kind) || (r.SingularName == kind) || contains(r.ShortNames, kind)
if gOk && vOk && kOk {
gvr = schema.GroupVersionResource{
Group: r.Group,
Version: r.Version,
Resource: r.Name,
}
namespaced = r.Namespaced
}
}
if gvr.Resource == "" {
return nil, fmt.Errorf("Failed to find GroupVersionKind matches apiVerions: %s, kind: %s", apiVersion, kind)
}
config, err := GetKubeConfig()
if err != nil {
return nil, fmt.Errorf("Error in getting k8s config; %s", err.Error())
}
dyClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("Error in creating DynamicClient; %s", err.Error())
}
var resourceList *unstructured.UnstructuredList
if namespaced {
resourceList, err = dyClient.Resource(gvr).Namespace(namespace).List(context.Background(), metav1.ListOptions{})
} else {
resourceList, err = dyClient.Resource(gvr).List(context.Background(), metav1.ListOptions{})
}
if err != nil {
return nil, fmt.Errorf("Error in getting resource; %s", err.Error())
}
resources := []*unstructured.Unstructured{}
for i := range resourceList.Items {
res := resourceList.Items[i]
resources = append(resources, &res)
}
return resources, nil
}
func contains(all []string, one string) bool {
for _, sub := range all {
if sub == one {
return true
}
}
return false
}
type ImageObject struct {
PodName string
ContainerName string
ImageID string
ImageRef string
Digest string
}
func GetAllImagesFromObject(obj *unstructured.Unstructured) ([]ImageObject, error) {
pods, err := GetAllPodsFromObject(obj)
if err != nil {
return nil, err
}
images := []ImageObject{}
for _, p := range pods {
podName := p.GetName()
for _, cstatus := range p.Status.InitContainerStatuses {
containerName := cstatus.Name
imageRef := cstatus.Image
imageID := cstatus.ImageID
if imageRef == "" || imageID == "" {
continue
}
parts := strings.Split(imageID, "@")
imageDigest := parts[len(parts)-1]
images = append(images, ImageObject{PodName: podName, ContainerName: containerName, ImageID: imageID, ImageRef: imageRef, Digest: imageDigest})
}
for _, cstatus := range p.Status.ContainerStatuses {
containerName := cstatus.Name
imageRef := cstatus.Image
imageID := cstatus.ImageID
if imageRef == "" || imageID == "" {
continue
}
parts := strings.Split(imageID, "@")
imageDigest := parts[len(parts)-1]
images = append(images, ImageObject{PodName: podName, ContainerName: containerName, ImageID: imageID, ImageRef: imageRef, Digest: imageDigest})
}
}
return images, nil
}
func GetAllPodsFromObject(obj *unstructured.Unstructured) ([]*corev1.Pod, error) {
kind := obj.GetKind()
name := obj.GetName()
podGetterFunc, ok := podGetterMapper[kind]
if !ok {
return nil, nil
}
pods, err := podGetterFunc(obj)
if err != nil {
return nil, errors.Wrapf(err, "failed to get pods for kind: %s, name: %s", kind, name)
}
return pods, nil
}
func getPodsNotImplemented(obj *unstructured.Unstructured) ([]*corev1.Pod, error) {
kind := obj.GetKind()
return nil, fmt.Errorf("pod getter is not implemented yet for kind: %s", kind)
}
func getPodsFromDeployment(obj *unstructured.Unstructured) ([]*corev1.Pod, error) {
var deploy appsv1.Deployment
objBytes, _ := json.Marshal(obj.Object)
err := json.Unmarshal(objBytes, &deploy)
if err != nil {
return nil, fmt.Errorf("error in converting object to Deployment; %s", err.Error())
}
namespace := deploy.GetNamespace()
selector, err := metav1.LabelSelectorAsSelector(deploy.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("error in converting *metav1.LabelSelector to labels.Selector; %s", err.Error())
}
selectorStr := selector.String()
config, err := GetKubeConfig()
if err != nil {
return nil, fmt.Errorf("error in getting k8s config; %s", err.Error())
}
client, err := corev1client.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error in creating Core V1 Client; %s", err.Error())
}
podList, err := client.Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: selectorStr})
if err != nil {
return nil, fmt.Errorf("error in listing pods; %s", err.Error())
}
pods := []*corev1.Pod{}
for i := range podList.Items {
p := podList.Items[i]
pods = append(pods, &p)
}
return pods, nil
}
| [
"\"KUBECONFIG\"",
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE",
"KUBECONFIG"
]
| [] | ["HOME", "USERPROFILE", "KUBECONFIG"] | go | 3 | 0 | |
tools/downloader/src/open_model_zoo/model_tools/converter.py | # Copyright (c) 2019-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import json
import os
import string
import sys
from pathlib import Path
from open_model_zoo.model_tools import (
_configuration, _common, _concurrency, _reporting,
)
ModelOptimizerProperties = collections.namedtuple('ModelOptimizerProperties',
['cmd_prefix', 'extra_args', 'base_dir'])
def run_pre_convert(reporter, model, output_dir, args):
script = _common.MODEL_ROOT / model.subdirectory / 'pre-convert.py'
if not script.exists():
return True
reporter.print_section_heading('{}Running pre-convert script for {}',
'(DRY RUN) ' if args.dry_run else '', model.name)
cmd = [str(args.python), '--', str(script), '--',
str(args.download_dir / model.subdirectory), str(output_dir / model.subdirectory)]
reporter.print('Pre-convert command: {}', _common.command_string(cmd))
reporter.print(flush=True)
success = True if args.dry_run else reporter.job_context.subprocess(cmd)
reporter.print()
return success
def convert_to_onnx(reporter, model, output_dir, args, template_variables):
reporter.print_section_heading('{}Converting {} to ONNX',
'(DRY RUN) ' if args.dry_run else '', model.name)
converter_path = Path(__file__).absolute().parent / \
'internal_scripts' / model.converter_to_onnx
conversion_to_onnx_args = [string.Template(arg).substitute(template_variables)
for arg in model.conversion_to_onnx_args]
cmd = [str(args.python), '--', str(converter_path), *conversion_to_onnx_args]
reporter.print('Conversion to ONNX command: {}', _common.command_string(cmd))
reporter.print(flush=True)
success = True if args.dry_run else reporter.job_context.subprocess(cmd)
reporter.print()
return success
def convert(reporter, model, output_dir, args, mo_props, requested_precisions):
telemetry = _common.Telemetry()
if model.mo_args is None:
reporter.print_section_heading('Skipping {} (no conversions defined)', model.name)
reporter.print()
return True
model_precisions = requested_precisions & model.precisions
if not model_precisions:
reporter.print_section_heading('Skipping {} (all conversions skipped)', model.name)
reporter.print()
return True
(output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True)
if not run_pre_convert(reporter, model, output_dir, args):
telemetry.send_event('md', 'converter_failed_models', model.name)
telemetry.send_event('md', 'converter_error',
json.dumps({'error': 'pre-convert-script-failed', 'model': model.name, 'precision': None}))
return False
model_format = model.framework
template_variables = {
'config_dir': _common.MODEL_ROOT / model.subdirectory,
'conv_dir': output_dir / model.subdirectory,
'dl_dir': args.download_dir / model.subdirectory,
'mo_dir': mo_props.base_dir,
}
if model.conversion_to_onnx_args:
if not convert_to_onnx(reporter, model, output_dir, args, template_variables):
telemetry.send_event('md', 'converter_failed_models', model.name)
telemetry.send_event('md', 'converter_error',
json.dumps({'error': 'convert_to_onnx-failed', 'model': model.name, 'precision': None}))
return False
model_format = 'onnx'
expanded_mo_args = [
string.Template(arg).substitute(template_variables)
for arg in model.mo_args]
for model_precision in sorted(model_precisions):
data_type = model_precision.split('-')[0]
mo_cmd = [*mo_props.cmd_prefix,
'--framework={}'.format(model_format),
'--data_type={}'.format(data_type),
'--output_dir={}'.format(output_dir / model.subdirectory / model_precision),
'--model_name={}'.format(model.name),
*expanded_mo_args, *mo_props.extra_args]
reporter.print_section_heading('{}Converting {} to IR ({})',
'(DRY RUN) ' if args.dry_run else '', model.name, model_precision)
reporter.print('Conversion command: {}', _common.command_string(mo_cmd))
if not args.dry_run:
reporter.print(flush=True)
if not reporter.job_context.subprocess(mo_cmd):
telemetry.send_event('md', 'converter_failed_models', model.name)
telemetry.send_event('md', 'converter_error',
json.dumps({'error': 'mo-failed', 'model': model.name, 'precision': model_precision}))
return False
reporter.print()
return True
def num_jobs_arg(value_str):
if value_str == 'auto':
return os.cpu_count() or 1
try:
value = int(value_str)
if value > 0: return value
except ValueError:
pass
raise argparse.ArgumentTypeError('must be a positive integer or "auto" (got {!r})'.format(value_str))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--download_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='root of the directory tree with downloaded model files')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
help='root of the directory tree to place converted files into')
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='convert only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='convert only models whose names match at least one of the patterns in the specified file')
parser.add_argument('--all', action='store_true', help='convert all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='run only conversions that produce models with the specified precisions')
parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable,
help='Python executable to run Model Optimizer with')
parser.add_argument('--mo', type=Path, metavar='MO.PY',
help='Model Optimizer entry point script')
parser.add_argument('--add_mo_arg', dest='extra_mo_args', metavar='ARG', action='append',
help='Extra argument to pass to Model Optimizer')
parser.add_argument('--dry_run', action='store_true',
help='Print the conversion commands without running them')
parser.add_argument('-j', '--jobs', type=num_jobs_arg, default=1,
help='number of conversions to run concurrently')
# aliases for backwards compatibility
parser.add_argument('--add-mo-arg', dest='extra_mo_args', action='append', help=argparse.SUPPRESS)
parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS)
args = parser.parse_args()
with _common.telemetry_session('Model Converter', 'converter') as telemetry:
models = _configuration.load_models_from_args(parser, args)
for mode in ['all', 'list', 'name']:
if getattr(args, mode):
telemetry.send_event('md', 'converter_selection_mode', mode)
if args.precisions is None:
requested_precisions = _common.KNOWN_PRECISIONS
else:
requested_precisions = set(args.precisions.split(','))
for model in models:
precisions_to_send = requested_precisions if args.precisions else requested_precisions & model.precisions
model_information = {
'name': model.name,
'framework': model.framework,
'precisions': str(precisions_to_send).replace(',', ';'),
}
telemetry.send_event('md', 'converter_model', json.dumps(model_information))
unknown_precisions = requested_precisions - _common.KNOWN_PRECISIONS
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
mo_path = args.mo
if mo_path is None:
mo_package_path = _common.get_package_path(args.python, 'mo')
if mo_package_path:
# run MO as a module
mo_cmd_prefix = [str(args.python), '-m', 'mo']
mo_dir = mo_package_path.parent
else:
try:
mo_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/model_optimizer/mo.py'
except KeyError:
sys.exit('Unable to locate Model Optimizer. '
+ 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')
if mo_path is not None:
# run MO as a script
mo_cmd_prefix = [str(args.python), '--', str(mo_path)]
mo_dir = mo_path.parent
output_dir = args.download_dir if args.output_dir is None else args.output_dir
reporter = _reporting.Reporter(_reporting.DirectOutputContext())
mo_props = ModelOptimizerProperties(
cmd_prefix=mo_cmd_prefix,
extra_args=args.extra_mo_args or [],
base_dir=mo_dir,
)
shared_convert_args = (output_dir, args, mo_props, requested_precisions)
if args.jobs == 1 or args.dry_run:
results = [convert(reporter, model, *shared_convert_args) for model in models]
else:
results = _concurrency.run_in_parallel(args.jobs,
lambda context, model:
convert(_reporting.Reporter(context), model, *shared_convert_args),
models)
failed_models = [model.name for model, successful in zip(models, results) if not successful]
if failed_models:
reporter.print('FAILED:')
for failed_model_name in failed_models:
reporter.print(failed_model_name)
sys.exit(1)
if __name__ == '__main__':
main()
| []
| []
| [
"INTEL_OPENVINO_DIR"
]
| [] | ["INTEL_OPENVINO_DIR"] | python | 1 | 0 | |
cmd/init.go | // Copyright © 2018 Dhananjay Balan <[email protected]>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
)
// initCmd represents the init command
var initCmd = &cobra.Command{
Use: "init",
Short: "Configure pipet",
Long: `Creates the config files for pipet if not present, usually you only need at the first use`,
Run: func(cmd *cobra.Command, args []string) {
configFile := expandHome("~/.pipet.yaml")
_, err := os.Stat(configFile)
if err == nil {
errorGuard(fmt.Errorf("e"), "files exists: delete ~/.pipet.yaml if you want to reconfig")
}
fmt.Printf("Where do you want to store snippets? (directory|default: ~/snippets): ")
snipDir := readLine()
if !isValidDirectory(snipDir) {
errorGuard(fmt.Errorf("invalid path"), "error")
}
// try getting $EDITOR!
defEdit := "(no defaults)"
editor := os.Getenv("EDITOR")
if editor != "" {
defEdit = fmt.Sprintf("(defaults to %s)", editor)
}
fmt.Printf("path to editor to use with pippet %s:", defEdit)
eBin := readLine()
config := &struct {
DocDir string `yaml:"document_dir,omitempty"`
EBin string `yaml:"editor_binary,omitempty"`
}{}
if eBin == "" && defEdit == "" {
errorGuard(fmt.Errorf("empy input"), "no editor specified")
}
if eBin == "" {
eBin = editor
}
if eBin != "" {
// eBin might be editor + args
// for e.g emacsclient -t
actualBinPath := strings.Split(eBin, " ")[0]
path, err := which(actualBinPath)
errorGuard(err, "No such editor found!")
// expanded path
if path != eBin {
fmt.Printf("Using %s as the absoulte path to editor\n", Green(path))
}
if actualBinPath != eBin {
config.EBin = path + " " + strings.Join(strings.Split(eBin, " ")[1:], " ")
} else {
config.EBin = path
}
} else {
errorGuard(fmt.Errorf("empty input"), "no editor sepcified")
}
if snipDir != "" {
config.DocDir = snipDir
} else {
config.DocDir = expandHome("~/snippets")
}
buf, err := yaml.Marshal(config)
if err != nil {
errorGuard(err, "failed marshalling data")
}
errorGuard(ioutil.WriteFile(configFile, buf, 0755), "writing file failed")
fmt.Printf(`pipet is now ready to use
snippets are stored in: %s
config is stored in %s
`, Green(snipDir), Green(configFile))
},
}
func init() {
rootCmd.AddCommand(initCmd)
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/onlaxstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *onlax_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("onlax-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"XGETTEXT"
]
| [] | ["XGETTEXT"] | python | 1 | 0 | |
services/inventory/server.go | package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/ContextLogic/inventory/graph"
"github.com/ContextLogic/inventory/graph/generated"
)
const defaultPort = "4002"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))
http.Handle("/", playground.Handler("GraphQL playground", "/query"))
http.Handle("/query", srv)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
test_ptrack/__init__.py | import os
from django.test.runner import DiscoverRunner
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_ptrack.settings'
test_runner = DiscoverRunner()
import django
django.setup()
# def setup():
# global test_runner
#
# from django.test.runner import DiscoverRunner
#
# test_runner = DiscoverRunner()
# test_runner.setup_test_environment()
# not setting up test db - ptrack tests do not require a DB
# def teardown():
# test_runner.teardown_test_environment()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
SecretsManagerRDSSQLServerRotationMultiUser/lambda_function.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import json
import logging
import os
import pymssql
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""Secrets Manager RDS SQL Server Handler
This handler uses the master-user rotation scheme to rotate an RDS SQL Server user credential. During the first rotation, this
scheme logs into the database as the master user, creates a new user (appending _clone to the username), and grants the
new user all of the permissions from the user being rotated. Once the secret is in this state, every subsequent rotation
simply creates a new secret with the AWSPREVIOUS user credentials, adds any missing permissions that are in the current
secret, changes that user's password, and then marks the latest secret as AWSCURRENT.
The Secret SecretString is expected to be a JSON string with the following format:
{
'engine': <required: must be set to 'sqlserver'>,
'host': <required: instance host name>,
'username': <required: username>,
'password': <required: password>,
'dbname': <optional: database name, default to 'master'>,
'port': <optional: if not specified, default port 1433 will be used>,
'masterarn': <required: the arn of the master secret which will be used to create users/change passwords>
}
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the secret json does not contain the expected keys
"""
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
# Setup the client
service_client = boto3.client('secretsmanager', endpoint_url=os.environ['SECRETS_MANAGER_ENDPOINT'])
# Make sure the version is staged correctly
metadata = service_client.describe_secret(SecretId=arn)
if "RotationEnabled" in metadata and not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
# Call the appropriate step
if step == "createSecret":
create_secret(service_client, arn, token)
elif step == "setSecret":
set_secret(service_client, arn, token)
elif step == "testSecret":
test_secret(service_client, arn, token)
elif step == "finishSecret":
finish_secret(service_client, arn, token)
else:
logger.error("lambda_handler: Invalid step parameter %s for secret %s" % (step, arn))
raise ValueError("Invalid step parameter %s for secret %s" % (step, arn))
def create_secret(service_client, arn, token):
"""Generate a new secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ValueError: If the current secret is not valid JSON
KeyError: If the secret json does not contain the expected keys
"""
# Make sure the current secret exists
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
get_secret_dict(service_client, arn, "AWSPENDING", token)
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except service_client.exceptions.ResourceNotFoundException:
# Get the alternate username swapping between the original user and the user with _clone appended to it
current_dict['username'] = get_alt_username(current_dict['username'])
# Get exclude characters from environment variable
exclude_characters = os.environ['EXCLUDE_CHARACTERS'] if 'EXCLUDE_CHARACTERS' in os.environ else '/@"\'\\'
# Generate a random password
passwd = service_client.get_random_password(ExcludeCharacters=exclude_characters, PasswordLength=30)
current_dict['password'] = passwd['RandomPassword']
# Put the secret
service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=json.dumps(current_dict), VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(service_client, arn, token):
"""Set the pending secret in the database
This method tries to login to the database with the AWSPENDING secret and returns on success. If that fails, it
tries to login with the master credentials from the masterarn in the current secret. If this succeeds, it adds all
grants for AWSCURRENT user to the AWSPENDING user, creating the user and/or setting the password in the process.
Else, it throws a ValueError.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON or master credentials could not be used to login to DB
KeyError: If the secret json does not contain the expected keys
"""
# First try to login with the pending secret, if it succeeds, return
pending_dict = get_secret_dict(service_client, arn, "AWSPENDING", token)
conn = get_connection(pending_dict)
if conn:
conn.close()
logger.info("setSecret: AWSPENDING secret is already set as password in SQL Server DB for secret arn %s." % arn)
return
# Before we do anything with the secret, make sure the AWSCURRENT secret is valid by logging in to the db
# This ensures that the credential we are rotating is valid to protect against a confused deputy attack
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
conn = get_connection(current_dict)
if not conn:
logger.error("setSecret: Unable to log into database using current credentials for secret %s" % arn)
raise ValueError("Unable to log into database using current credentials for secret %s" % arn)
conn.close()
# Now get the master arn from the current secret
master_arn = current_dict['masterarn']
master_dict = get_secret_dict(service_client, master_arn, "AWSCURRENT")
if current_dict['host'] != master_dict['host']:
logger.warn("setSecret: Master database host %s is not the same host as current %s" % (master_dict['host'], current_dict['host']))
# Now log into the database with the master credentials
conn = get_connection(master_dict)
if not conn:
logger.error("setSecret: Unable to log into database using credentials in master secret %s" % master_arn)
raise ValueError("Unable to log into database using credentials in master secret %s" % master_arn)
# Now set the password to the pending password
try:
with conn.cursor(as_dict=True) as cursor:
# Get the current version and db
cursor.execute("SELECT @@VERSION AS version")
version = cursor.fetchall()[0]['version']
cursor.execute("SELECT DB_NAME() AS name")
current_db = cursor.fetchall()[0]['name']
# Determine if we are in a contained DB
containment = 0
if not version.startswith("Microsoft SQL Server 2008"): # SQL Server 2008 does not support contained databases
cursor.execute("SELECT containment FROM sys.databases WHERE name = %s", current_db)
containment = cursor.fetchall()[0]['containment']
# Set the user or login password (depending on database containment)
if containment == 0:
set_password_for_login(cursor, current_db, current_dict['username'], pending_dict)
else:
set_password_for_user(cursor, current_dict['username'], pending_dict)
conn.commit()
logger.info("setSecret: Successfully created user %s in SQL Server DB for secret arn %s." % (pending_dict['username'], arn))
finally:
conn.close()
def test_secret(service_client, arn, token):
"""Test the pending secret against the database
This method tries to log into the database with the secrets staged with AWSPENDING and runs
a permissions check to ensure the user has the correct permissions.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON or pending credentials could not be used to login to the database
KeyError: If the secret json does not contain the expected keys
"""
# Try to login with the pending secret, if it succeeds, return
conn = get_connection(get_secret_dict(service_client, arn, "AWSPENDING", token))
if conn:
# This is where the lambda will validate the user's permissions. Uncomment/modify the below lines to
# tailor these validations to your needs
try:
with conn.cursor() as cur:
cur.execute("SELECT @@VERSION AS version")
finally:
conn.close()
logger.info("testSecret: Successfully signed into SQL Server DB with AWSPENDING secret in %s." % arn)
return
else:
logger.error("testSecret: Unable to log into database with pending secret of secret ARN %s" % arn)
raise ValueError("Unable to log into database with pending secret of secret ARN %s" % arn)
def finish_secret(service_client, arn, token):
"""Finish the rotation by marking the pending secret as current
This method moves the secret from the AWSPENDING stage to the AWSCURRENT stage.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn does not exist
"""
# First describe the secret to get the current version
metadata = service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (token, arn))
def get_connection(secret_dict):
"""Gets a connection to SQL Server DB from a secret dictionary
This helper function tries to connect to the database grabbing connection info
from the secret dictionary. If successful, it returns the connection, else None
Args:
secret_dict (dict): The Secret Dictionary
Returns:
Connection: The pymssql.Connection object if successful. None otherwise
Raises:
KeyError: If the secret json does not contain the expected keys
"""
# Parse and validate the secret JSON string
port = str(secret_dict['port']) if 'port' in secret_dict else '1433'
dbname = secret_dict['dbname'] if 'dbname' in secret_dict else 'master'
# Try to obtain a connection to the db
try:
conn = pymssql.connect(server=secret_dict['host'],
user=secret_dict['username'],
password=secret_dict['password'],
database=dbname,
port=port,
login_timeout=5,
as_dict=True)
return conn
except pymssql.OperationalError:
return None
def get_secret_dict(service_client, arn, stage, token=None):
"""Gets the secret dictionary corresponding for the secret arn, stage, and token
This helper function gets credentials for the arn and stage passed in and returns the dictionary by parsing the JSON string
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version, or None if no validation is desired
stage (string): The stage identifying the secret version
Returns:
SecretDictionary: Secret dictionary
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON
KeyError: If the secret json does not contain the expected keys
"""
required_fields = ['host', 'username', 'password']
# Only do VersionId validation against the stage if a token is passed in
if token:
secret = service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage=stage)
else:
secret = service_client.get_secret_value(SecretId=arn, VersionStage=stage)
plaintext = secret['SecretString']
secret_dict = json.loads(plaintext)
# Run validations against the secret
if 'engine' not in secret_dict or secret_dict['engine'] != 'sqlserver':
raise KeyError("Database engine must be set to 'sqlserver' in order to use this rotation lambda")
for field in required_fields:
if field not in secret_dict:
raise KeyError("%s key is missing from secret JSON" % field)
# Parse and return the secret JSON string
return secret_dict
def get_alt_username(current_username):
"""Gets the alternate username for the current_username passed in
This helper function gets the username for the alternate user based on the passed in current username.
Args:
current_username (client): The current username
Returns:
AlternateUsername: Alternate username
Raises:
ValueError: If the new username length would exceed the maximum allowed
"""
clone_suffix = "_clone"
if current_username.endswith(clone_suffix):
return current_username[:(len(clone_suffix) * -1)]
else:
new_username = current_username + clone_suffix
if len(new_username) > 128:
raise ValueError("Unable to clone user, username length with _clone appended would exceed 128 characters")
return new_username
def set_password_for_login(cursor, current_db, current_login, pending_dict):
"""Runs various SQL statements in order to set the login password to that of the pending secret dictionary
This helper function runs SQL statements in order to set the login password to that of the pending secret dictionary
Args:
cursor (pymssql.Cursor): The pymssql Cursor object
current_db (string): The current database that we are connected to
current_login (string): The current user login
pending_dict (dict): The Secret Dictionary for the pending secret
Raises:
pymssql.OperationalError: If there are any errors running the SQL statements
"""
# Check if the login exists, if not create it and grant it all permissions from the current user
# If the user exists, just update the password
cursor.execute("SELECT name FROM sys.server_principals WHERE name = %s", pending_dict['username'])
if len(cursor.fetchall()) == 0:
# Create the new login
create_login = "CREATE LOGIN %s" % pending_dict['username']
cursor.execute(create_login + " WITH PASSWORD = %s", pending_dict['password'])
# Only handle server level permissions if we are connected the the master DB
if current_db == 'master':
# Loop through the types of server permissions and grant them to the new login
query = "SELECT state_desc, permission_name FROM sys.server_permissions perm "\
"JOIN sys.server_principals prin ON perm.grantee_principal_id = prin.principal_id "\
"WHERE prin.name = '%s'" % current_login
cursor.execute(query)
for row in cursor.fetchall():
if row['state_desc'] == 'GRANT_WITH_GRANT_OPTION':
cursor.execute("GRANT %s TO %s WITH GRANT OPTION" % (row['permission_name'], pending_dict['username']))
else:
cursor.execute("%s %s TO %s" % (row['state_desc'], row['permission_name'], pending_dict['username']))
# We do not create user objects in the master database
else:
# Get the user for the current login and generate the alt user
cursor.execute("SELECT dbprin.name FROM sys.database_principals dbprin JOIN sys.server_principals sprin ON dbprin.sid = sprin.sid WHERE sprin.name = %s", current_login)
cur_user = cursor.fetchall()[0]['name']
alt_user = get_alt_username(cur_user)
# Check if the user exists. If not, create it
cursor.execute("SELECT name FROM sys.database_principals WHERE name = %s", alt_user)
if len(cursor.fetchall()) == 0:
cursor.execute("CREATE USER %s FOR LOGIN %s" % (alt_user, pending_dict['username']))
apply_database_permissions(cursor, cur_user, pending_dict['username'])
else:
alter_stmt = "ALTER LOGIN %s" % pending_dict['username']
cursor.execute(alter_stmt + " WITH PASSWORD = %s", pending_dict['password'])
def set_password_for_user(cursor, current_user, pending_dict):
"""Runs various SQL statements in order to set the user password to that of the pending secret dictionary
This helper function runs SQL statements in order to set the user password to that of the pending secret dictionary
Args:
cursor (pymssql.Cursor): The pymssql Cursor object
current_user (string): The current username
pending_dict (dict): The Secret Dictionary for the pending secret
Raises:
pymssql.OperationalError: If there are any errors running the SQL statements
"""
# Check if the user exists, if not create it and grant it all permissions from the current user
# If the user exists, just update the password
cursor.execute("SELECT name FROM sys.database_principals WHERE name = %s", pending_dict['username'])
if len(cursor.fetchall()) == 0:
# Create the new user
create_login = "CREATE USER %s" % pending_dict['username']
cursor.execute(create_login + " WITH PASSWORD = %s", pending_dict['password'])
apply_database_permissions(cursor, current_user, pending_dict['username'])
else:
alter_stmt = "ALTER USER %s" % pending_dict['username']
cursor.execute(alter_stmt + " WITH PASSWORD = %s", pending_dict['password'])
def apply_database_permissions(cursor, current_user, pending_user):
"""Runs various SQL statements to apply the database permissions from current_user to pending_user
This helper function runs SQL statements to apply the database permissions from current_user to pending_user
Args:
cursor (pymssql.Cursor): The pymssql Cursor object
current_user (string): The current username
pending_user (string): The pending username
Raises:
pymssql.OperationalError: If there are any errors running the SQL statements
ValueError: If any database values were unexpected/invalid
"""
# Get the roles assigned to the current user and assign it to the pending user
query = "SELECT roleprin.name FROM sys.database_role_members rolemems "\
"JOIN sys.database_principals roleprin ON roleprin.principal_id = rolemems.role_principal_id "\
"JOIN sys.database_principals userprin ON userprin.principal_id = rolemems.member_principal_id "\
"WHERE userprin.name = '%s'" % current_user
cursor.execute(query)
for row in cursor.fetchall():
sql_stmt = "ALTER ROLE %s ADD MEMBER %s" % (row['name'], pending_user)
# Loop through the database permissions and grant them to the user
query = "SELECT "\
"class = perm.class, "\
"state_desc = perm.state_desc, "\
"perm_name = perm.permission_name, "\
"schema_name = permschem.name, "\
"obj_name = obj.name, "\
"obj_schema_name = objschem.name, "\
"col_name = col.name, "\
"imp_name = imp.name, "\
"imp_type = imp.type, "\
"assembly_name = assembly.name, "\
"type_name = types.name, "\
"type_schema = typeschem.name, "\
"schema_coll_name = schema_coll.name, "\
"xml_schema = xmlschem.name, "\
"msg_type_name = msg_type.name, "\
"contract_name = contract.name, "\
"svc_name = svc.name, "\
"binding_name = binding.name, "\
"route_name = route.name, "\
"catalog_name = catalog.name, "\
"symkey_name = symkey.name, "\
"cert_name = cert.name, "\
"asymkey_name = asymkey.name "\
"FROM sys.database_permissions perm "\
"JOIN sys.database_principals prin ON perm.grantee_principal_id = prin.principal_id "\
"LEFT JOIN sys.schemas permschem ON permschem.schema_id = perm.major_id "\
"LEFT JOIN sys.objects obj ON obj.object_id = perm.major_id "\
"LEFT JOIN sys.schemas objschem ON objschem.schema_id = obj.schema_id "\
"LEFT JOIN sys.columns col ON col.object_id = perm.major_id AND col.column_id = perm.minor_id "\
"LEFT JOIN sys.database_principals imp ON imp.principal_id = perm.major_id "\
"LEFT JOIN sys.assemblies assembly ON assembly.assembly_id = perm.major_id "\
"LEFT JOIN sys.types types ON types.user_type_id = perm.major_id "\
"LEFT JOIN sys.schemas typeschem ON typeschem.schema_id = types.schema_id "\
"LEFT JOIN sys.xml_schema_collections schema_coll ON schema_coll.xml_collection_id = perm.major_id "\
"LEFT JOIN sys.schemas xmlschem ON xmlschem.schema_id = schema_coll.schema_id "\
"LEFT JOIN sys.service_message_types msg_type ON msg_type.message_type_id = perm.major_id "\
"LEFT JOIN sys.service_contracts contract ON contract.service_contract_id = perm.major_id "\
"LEFT JOIN sys.services svc ON svc.service_id = perm.major_id "\
"LEFT JOIN sys.remote_service_bindings binding ON binding.remote_service_binding_id = perm.major_id "\
"LEFT JOIN sys.routes route ON route.route_id = perm.major_id "\
"LEFT JOIN sys.fulltext_catalogs catalog ON catalog.fulltext_catalog_id = perm.major_id "\
"LEFT JOIN sys.symmetric_keys symkey ON symkey.symmetric_key_id = perm.major_id "\
"LEFT JOIN sys.certificates cert ON cert.certificate_id = perm.major_id "\
"LEFT JOIN sys.asymmetric_keys asymkey ON asymkey.asymmetric_key_id = perm.major_id "\
"WHERE prin.name = '%s'" % current_user
cursor.execute(query)
for row in cursor.fetchall():
# Determine which type of permission this is and create the sql statement accordingly
if row['class'] == 0: # Database permission
permission = row['perm_name']
elif row['class'] == 1: # Object or Column
permission = "%s ON OBJECT::%s.%s" % (row['perm_name'], row['obj_schema_name'], row['obj_name'])
if row['col_name']:
permission = "%s (%s) " % (permission, row['col_name'])
elif row['class'] == 3: # Schema
permission = "%s ON SCHEMA::%s" % (row['perm_name'], row['schema_name'])
elif row['class'] == 4: # Impersonation (Database Principal)
if row['imp_type'] == 'S': # SQL User
permission = "%s ON USER::%s" % (row['perm_name'], row['imp_name'])
elif row['imp_type'] == 'R': # Role
permission = "%s ON ROLE::%s" % (row['perm_name'], row['imp_name'])
elif row['imp_type'] == 'A': # Application Role
permission = "%s ON APPLICATION ROLE::%s" % (row['perm_name'], row['imp_name'])
else:
raise ValueError("Invalid database principal permission type %s" % row['imp_type'])
elif row['class'] == 5: # Assembly
permission = "%s ON ASSEMBLY::%s" % (row['perm_name'], row['assembly_name'])
elif row['class'] == 6: # Type
permission = "%s ON TYPE::%s.%s" % (row['perm_name'], row['type_schema'], row['type_name'])
elif row['class'] == 10: # XML Schema Collection
permission = "%s ON XML SCHEMA COLLECTION::%s.%s" % (row['perm_name'], row['xml_schema'], row['schema_coll_name'])
elif row['class'] == 15: # Message Type
permission = "%s ON MESSAGE TYPE::%s" % (row['perm_name'], row['msg_type_name'])
elif row['class'] == 16: # Service Contract
permission = "%s ON CONTRACT::%s" % (row['perm_name'], row['contract_name'])
elif row['class'] == 17: # Service
permission = "%s ON SERVICE::%s" % (row['perm_name'], row['svc_name'])
elif row['class'] == 18: # Remote Service Binding
permission = "%s ON REMOTE SERVICE BINDING::%s" % (row['perm_name'], row['binding_name'])
elif row['class'] == 19: # Route
permission = "%s ON ROUTE::%s" % (row['perm_name'], row['route_name'])
elif row['class'] == 23: # Full-Text Catalog
permission = "%s ON FULLTEXT CATALOG::%s" % (row['perm_name'], row['catalog_name'])
elif row['class'] == 24: # Symmetric Key
permission = "%s ON SYMMETRIC KEY::%s" % (row['perm_name'], row['symkey_name'])
elif row['class'] == 25: # Certificate
permission = "%s ON CERTIFICATE::%s" % (row['perm_name'], row['cert_name'])
elif row['class'] == 26: # Asymmetric Key
permission = "%s ON ASYMMETRIC KEY::%s" % (row['perm_name'], row['asymkey_name'])
else:
raise ValueError("Invalid database permission class %s" % row['class'])
# Add the state to the statement
if row['state_desc'] == 'GRANT_WITH_GRANT_OPTION':
sql_stmt = "GRANT %s TO %s WITH GRANT OPTION" % (permission, pending_user)
else:
sql_stmt = "%s %s TO %s" % (row['state_desc'], permission, pending_user)
# Execute the sql
cursor.execute(sql_stmt)
| []
| []
| [
"EXCLUDE_CHARACTERS",
"SECRETS_MANAGER_ENDPOINT"
]
| [] | ["EXCLUDE_CHARACTERS", "SECRETS_MANAGER_ENDPOINT"] | python | 2 | 0 | |
bcbio/pipeline/main.py | """Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
from __future__ import print_function
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import toolz as tz
from bcbio import log, heterogeneity, hla, structural, utils
from bcbio.cwl.inspect import initialize_watcher
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger, DEFAULT_LOG_DIR
from bcbio.ngsalign import alignprep
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import (ensemble, genotype, population, validate, joint,
peddy)
from bcbio.chipseq import peaks, atac
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
logger.info(f"System YAML configuration: {os.path.abspath(config_file)}.")
logger.info(f"Locale set to {locale_to_use}.")
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
def variant2pipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
# Assign GATK supplied memory if required for post-process recalibration
align_programs = ["aligner", "samtools", "sambamba"]
if any(tz.get_in(["algorithm", "recalibrate"], utils.to_single_data(d)) in [True, "gatk"] for d in samples):
align_programs.append("gatk")
with prun.start(_wres(parallel, align_programs,
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = run_parallel("calculate_sv_bins", [samples])
samples = run_parallel("calculate_sv_coverage", samples)
samples = run_parallel("normalize_sv_coverage", [samples])
samples = region.clean_sample_data(samples)
with profile.report("hla typing", dirs):
samples = hla.run(samples, run_parallel)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "sambamba",
"bcbio-variation-recall", "qsignature",
"svcaller", "kraken", "preseq"]),
samples, config, dirs, "multicore2",
multiplier=structural.parallel_multiplier(samples)) as run_parallel:
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "initial")
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "standard")
with profile.report("structural variation ensemble", dirs):
samples = structural.run(samples, run_parallel, "ensemble")
with profile.report("structural variation validation", dirs):
samples = run_parallel("validate_sv", samples)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
with profile.report("peddy check", dirs):
samples = peddy.run_peddy_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print("---", i, len(samples))
for sample in (utils.to_single_data(x) for x in samples):
print(" ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file"))
def standardpipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "qsignature", "kraken", "gatk", "samtools", "preseq"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def rnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk", "vardict"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["samtools", "fastqc", "qualimap",
"kraken", "gatk", "preseq"], ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
with profile.report("bcbioRNAseq loading", dirs):
tools_on = dd.get_in_samples(samples, dd.get_tools_on)
bcbiornaseq_on = tools_on and "bcbiornaseq" in tools_on
if bcbiornaseq_on and len(samples) < 3:
logger.warn("bcbioRNASeq needs at least three samples total, skipping.")
else:
run_parallel("run_bcbiornaseqload", [sample])
logger.info("Timing: finished")
return samples
def fastrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
ww = initialize_watcher(samples)
with prun.start(_wres(parallel, ["samtools"]), samples, config,
dirs, "fastrnaseq") as run_parallel:
with profile.report("fastrnaseq", dirs):
samples = rnaseq.fast_rnaseq(samples, run_parallel)
ww.report("fastrnaseq", samples)
samples = rnaseq.combine_files(samples)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
ww.report("qcsummary", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def singlecellrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["samtools", "rapmap"]), samples, config,
dirs, "singlecell-rnaseq") as run_parallel:
with profile.report("singlecell-rnaseq", dirs):
samples = rnaseq.singlecell_rnaseq(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def smallrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
# causes a circular import at the top level
from bcbio.srna.group import report as srna_report
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("seqcluster alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment_samples",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "miraligner"]),
samples, config, dirs, "annotation") as run_parallel:
with profile.report("small RNA annotation", dirs):
samples = run_parallel("srna_annotation", samples)
with prun.start(_wres(parallel, ["seqcluster", "mirge"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
srna_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def chipseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
with prun.start(_wres(parallel, ["peakcaller"]),
samples, config, dirs, "peakcalling",
multiplier = peaks._get_multiplier(samples)) as run_parallel:
with profile.report("peakcalling", dirs):
samples = peaks.peakcall_prepare(samples, run_parallel)
samples = peaks.call_consensus(samples)
samples = run_parallel("run_chipseq_count", samples)
samples = peaks.create_peaktable(samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
samples = atac.create_ataqv_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def wgbsseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["fastqc", "picard"], ensure_mem={"fastqc" : 4}),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_bs_sample", samples)
with prun.start(_wres(parallel, ["aligner", "bismark", "picard", "samtools"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ['samtools']), samples, config, dirs,
'deduplication') as run_parallel:
with profile.report('deduplicate', dirs):
samples = run_parallel('deduplicate_bismark', samples)
with prun.start(_wres(parallel, ["caller"], ensure_mem={"caller": 5}),
samples, config, dirs, "multicore2",
multiplier=24) as run_parallel:
with profile.report("cpg calling", dirs):
samples = run_parallel("cpg_calling", samples)
# with prun.start(_wres(parallel, ["picard", "fastqc", "samtools"]),
# samples, config, dirs, "qc") as run_parallel:
# with profile.report("quality control", dirs):
# samples = qcsummary.generate_parallel(samples, run_parallel)
return samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples):
"""
organizes RNA-seq and small-RNAseq samples, converting from BAM if
necessary and trimming if necessary
"""
pipeline = dd.get_in_samples(samples, dd.get_analysis)
trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)])
resources = ["picard"]
needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set)
if needs_trimming:
resources.append("atropos")
with prun.start(_wres(parallel, resources),
samples, config, dirs, "trimming",
max_multicore=1 if not needs_trimming else None) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
if needs_trimming:
with profile.report("adapter trimming", dirs):
if _is_smallrnaseq(pipeline):
samples = run_parallel("trim_srna_sample", samples)
else:
samples = run_parallel("trim_sample", samples)
return samples
def _get_pipeline(item):
from bcbio.log import logger
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
samples = config_utils.load_config(run_info_yaml)
if isinstance(samples, dict):
resources = samples.pop("resources")
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.items():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
if pkvs is not None:
for key, val in pkvs.items():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
SUPPORTED_PIPELINES = {"variant2": variant2pipeline,
"snp calling": variant2pipeline,
"variant": variant2pipeline,
"standard": standardpipeline,
"minimal": standardpipeline,
"rna-seq": rnaseqpipeline,
"smallrna-seq": smallrnaseqpipeline,
"chip-seq": chipseqpipeline,
"wgbs-seq": wgbsseqpipeline,
"fastrna-seq": fastrnaseqpipeline,
"scrna-seq": singlecellrnaseqpipeline}
def _is_smallrnaseq(pipeline):
return pipeline.lower() == "smallrna-seq"
| []
| []
| [
"LC_ALL",
"LANG",
"LC"
]
| [] | ["LC_ALL", "LANG", "LC"] | python | 3 | 0 | |
build/scripts/go_tool.py | import argparse
import copy
import json
import os
import shutil
import subprocess
import sys
import tempfile
import threading
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# print >>sys.stderr, ' '.join(cmd)
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = list(filter(lambda x: x.endswith('.go'), srcs))
args.asm_srcs = list(filter(lambda x: x.endswith('.s'), srcs))
args.objects = list(filter(lambda x: x.endswith('.o') or x.endswith('.obj'), srcs))
args.symabis = list(filter(lambda x: x.endswith('.symabis'), srcs))
args.sysos = list(filter(lambda x: x.endswith('.syso'), srcs))
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in import_map.items():
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in module_map.items():
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# print >>sys.stderr, content
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.arc_source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': list(filter(lambda x: x.endswith('.go'), args.go_srcs)),
'NonGoFiles': list(filter(lambda x: not x.endswith('.go'), args.go_srcs)),
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# print >>sys.stderr, json.dumps(data, indent=4)
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in full_diags.iteritems():
for _, type_diags in module_diags.iteritems():
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root[:-1], '$B')
report = report.replace(args.arc_source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), filter(None, test_args_list), ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# print >>sys.stderr, '>>>> [{}]'.format(' '.join(cmd))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.build_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [args.go_compile, '-o', args.output, '-trimpath', args.arc_source_root, '-p', import_path, '-D', '""']
cmd += ['-goversion', 'go' + args.goversion]
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
if compare_versions('1.12', args.goversion) >= 0:
if args.symabis:
cmd += ['-symabis'] + args.symabis
if compare_versions('1.13', args.goversion) >= 0:
pass
elif import_path in ('runtime', 'runtime/internal/atomic'):
cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
cmd += args.compile_flags
if '-race' in args.compile_flags:
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
raise self.exc_info[0], self.exc_info[1], self.exc_info[2]
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm, '-trimpath', args.arc_source_root]
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any(map(lambda x: x.startswith(build_info), compile_args.peers)):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = None
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: not x in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += list(filter(filter_musl, args.extldflags))
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
extldflags.append('-Wl,--start-group')
extldflags.extend(os.path.join(args.build_root, x) for x in args.cgo_peers)
if is_group:
extldflags.append('-Wl,--end-group')
if len(extldflags) > 0:
cmd.append('-extldflags=' + ' '.join(extldflags))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set(skip_tests)
return filter(lambda x: x not in skip_set, tests)
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = filter(lambda x: len(x) > 0, (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n'))
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = filter(lambda x: len(x) > 0, (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n'))
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in list(filter(lambda x: x.startswith(kind), tests)):
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in list(filter(lambda x: x.startswith(kind), xtests)):
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# print >>sys.stderr, content
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++output-root', required=True)
parser.add_argument('++tools-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++arc-source-root')
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
args = parser.parse_args()
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = list(filter(lambda x: x not in cgo_srcs_set, args.srcs))
args.pkg_root = os.path.join(str(args.tools_root), 'pkg')
args.tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(args.tool_root, 'compile')
args.go_cgo = os.path.join(args.tool_root, 'cgo')
args.go_link = os.path.join(args.tool_root, 'link')
args.go_asm = os.path.join(args.tool_root, 'asm')
args.go_pack = os.path.join(args.tool_root, 'pack')
args.go_vet = os.path.join(args.tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.build_root = os.path.normpath(args.build_root) + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root)
args.module_path = args.output_root[len(args.build_root):]
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
classify_srcs(args.srcs, args)
assert args.asmhdr is None or args.word == 'go'
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
print >>sys.stderr, 'Unknown build mode [{}]...'.format(args.mode)
except subprocess.CalledProcessError as e:
print >>sys.stderr, '{} returned non-zero exit code {}. stop.'.format(' '.join(e.cmd), e.returncode)
print >>sys.stderr, e.output
exit_code = e.returncode
except Exception as e:
print >>sys.stderr, "Unhandled exception [{}]...".format(str(e))
sys.exit(exit_code)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
www/smart_mirror_rest/SmartMirrorRest/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SmartMirrorRest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/net/http/transport.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP client implementation. See RFC 7230 through 7235.
//
// This is the low-level Transport implementation of RoundTripper.
// The high-level interface is in client.go.
package http
import (
"bufio"
"compress/gzip"
"container/list"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"net/http/httptrace"
"net/textproto"
"net/url"
"os"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
"golang_org/x/net/http/httpguts"
"golang_org/x/net/http/httpproxy"
)
// DefaultTransport is the default implementation of Transport and is
// used by DefaultClient. It establishes network connections as needed
// and caches them for reuse by subsequent calls. It uses HTTP proxies
// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and
// $no_proxy) environment variables.
var DefaultTransport RoundTripper = &Transport{
Proxy: ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// DefaultMaxIdleConnsPerHost is the default value of Transport's
// MaxIdleConnsPerHost.
const DefaultMaxIdleConnsPerHost = 2
// connsPerHostClosedCh is a closed channel used by MaxConnsPerHost
// for the property that receives from a closed channel return the
// zero value.
var connsPerHostClosedCh = make(chan struct{})
func init() {
close(connsPerHostClosedCh)
}
// Transport is an implementation of RoundTripper that supports HTTP,
// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
//
// By default, Transport caches connections for future re-use.
// This may leave many open connections when accessing many hosts.
// This behavior can be managed using Transport's CloseIdleConnections method
// and the MaxIdleConnsPerHost and DisableKeepAlives fields.
//
// Transports should be reused instead of created as needed.
// Transports are safe for concurrent use by multiple goroutines.
//
// A Transport is a low-level primitive for making HTTP and HTTPS requests.
// For high-level functionality, such as cookies and redirects, see Client.
//
// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
// for HTTPS URLs, depending on whether the server supports HTTP/2,
// and how the Transport is configured. The DefaultTransport supports HTTP/2.
// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
// and call ConfigureTransport. See the package docs for more about HTTP/2.
//
// The Transport will send CONNECT requests to a proxy for its own use
// when processing HTTPS requests, but Transport should generally not
// be used to send a CONNECT request. That is, the Request passed to
// the RoundTrip method should not have a Method of "CONNECT", as Go's
// HTTP/1.x implementation does not support full-duplex request bodies
// being written while the response body is streamed. Go's HTTP/2
// implementation does support full duplex, but many CONNECT proxies speak
// HTTP/1.x.
//
// Responses with status codes in the 1xx range are either handled
// automatically (100 expect-continue) or ignored. The one
// exception is HTTP status code 101 (Switching Protocols), which is
// considered a terminal status and returned by RoundTrip. To see the
// ignored 1xx responses, use the httptrace trace package's
// ClientTrace.Got1xxResponse.
type Transport struct {
idleMu sync.Mutex
wantIdle bool // user has requested to close all idle conns
idleConn map[connectMethodKey][]*persistConn // most recently used at end
idleConnCh map[connectMethodKey]chan *persistConn
idleLRU connLRU
reqMu sync.Mutex
reqCanceler map[*Request]func(error)
altMu sync.Mutex // guards changing altProto only
altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
connCountMu sync.Mutex
connPerHostCount map[connectMethodKey]int
connPerHostAvailable map[connectMethodKey]chan struct{}
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
//
// The proxy type is determined by the URL scheme. "http",
// "https", and "socks5" are supported. If the scheme is empty,
// "http" is assumed.
//
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*Request) (*url.URL, error)
// DialContext specifies the dial function for creating unencrypted TCP connections.
// If DialContext is nil (and the deprecated Dial below is also nil),
// then the transport dials using package net.
//
// DialContext runs concurrently with calls to RoundTrip.
// A RoundTrip call that initiates a dial may end up using
// an connection dialed previously when the earlier connection
// becomes idle before the later DialContext completes.
DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Dial specifies the dial function for creating unencrypted TCP connections.
//
// Dial runs concurrently with calls to RoundTrip.
// A RoundTrip call that initiates a dial may end up using
// an connection dialed previously when the earlier connection
// becomes idle before the later Dial completes.
//
// Deprecated: Use DialContext instead, which allows the transport
// to cancel dials as soon as they are no longer needed.
// If both are set, DialContext takes priority.
Dial func(network, addr string) (net.Conn, error)
// DialTLS specifies an optional dial function for creating
// TLS connections for non-proxied HTTPS requests.
//
// If DialTLS is nil, Dial and TLSClientConfig are used.
//
// If DialTLS is set, the Dial hook is not used for HTTPS
// requests and the TLSClientConfig and TLSHandshakeTimeout
// are ignored. The returned net.Conn is assumed to already be
// past the TLS handshake.
DialTLS func(network, addr string) (net.Conn, error)
// TLSClientConfig specifies the TLS configuration to use with
// tls.Client.
// If nil, the default configuration is used.
// If non-nil, HTTP/2 support may not be enabled by default.
TLSClientConfig *tls.Config
// TLSHandshakeTimeout specifies the maximum amount of time waiting to
// wait for a TLS handshake. Zero means no timeout.
TLSHandshakeTimeout time.Duration
// DisableKeepAlives, if true, disables HTTP keep-alives and
// will only use the connection to the server for a single
// HTTP request.
//
// This is unrelated to the similarly named TCP keep-alives.
DisableKeepAlives bool
// DisableCompression, if true, prevents the Transport from
// requesting compression with an "Accept-Encoding: gzip"
// request header when the Request contains no existing
// Accept-Encoding value. If the Transport requests gzip on
// its own and gets a gzipped response, it's transparently
// decoded in the Response.Body. However, if the user
// explicitly requested gzip it is not automatically
// uncompressed.
DisableCompression bool
// MaxIdleConns controls the maximum number of idle (keep-alive)
// connections across all hosts. Zero means no limit.
MaxIdleConns int
// MaxIdleConnsPerHost, if non-zero, controls the maximum idle
// (keep-alive) connections to keep per-host. If zero,
// DefaultMaxIdleConnsPerHost is used.
MaxIdleConnsPerHost int
// MaxConnsPerHost optionally limits the total number of
// connections per host, including connections in the dialing,
// active, and idle states. On limit violation, dials will block.
//
// Zero means no limit.
//
// For HTTP/2, this currently only controls the number of new
// connections being created at a time, instead of the total
// number. In practice, hosts using HTTP/2 only have about one
// idle connection, though.
MaxConnsPerHost int
// IdleConnTimeout is the maximum amount of time an idle
// (keep-alive) connection will remain idle before closing
// itself.
// Zero means no limit.
IdleConnTimeout time.Duration
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout time.Duration
// ExpectContinueTimeout, if non-zero, specifies the amount of
// time to wait for a server's first response headers after fully
// writing the request headers if the request has an
// "Expect: 100-continue" header. Zero means no timeout and
// causes the body to be sent immediately, without
// waiting for the server to approve.
// This time does not include the time to send the request header.
ExpectContinueTimeout time.Duration
// TLSNextProto specifies how the Transport switches to an
// alternate protocol (such as HTTP/2) after a TLS NPN/ALPN
// protocol negotiation. If Transport dials an TLS connection
// with a non-empty protocol name and TLSNextProto contains a
// map entry for that key (such as "h2"), then the func is
// called with the request's authority (such as "example.com"
// or "example.com:1234") and the TLS connection. The function
// must return a RoundTripper that then handles the request.
// If TLSNextProto is not nil, HTTP/2 support is not enabled
// automatically.
TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper
// ProxyConnectHeader optionally specifies headers to send to
// proxies during CONNECT requests.
ProxyConnectHeader Header
// MaxResponseHeaderBytes specifies a limit on how many
// response bytes are allowed in the server's response
// header.
//
// Zero means to use a default limit.
MaxResponseHeaderBytes int64
// nextProtoOnce guards initialization of TLSNextProto and
// h2transport (via onceSetNextProtoDefaults)
nextProtoOnce sync.Once
h2transport h2Transport // non-nil if http2 wired up
}
// h2Transport is the interface we expect to be able to call from
// net/http against an *http2.Transport that's either bundled into
// h2_bundle.go or supplied by the user via x/net/http2.
//
// We name it with the "h2" prefix to stay out of the "http2" prefix
// namespace used by x/tools/cmd/bundle for h2_bundle.go.
type h2Transport interface {
CloseIdleConnections()
}
// onceSetNextProtoDefaults initializes TLSNextProto.
// It must be called via t.nextProtoOnce.Do.
func (t *Transport) onceSetNextProtoDefaults() {
if strings.Contains(os.Getenv("GODEBUG"), "http2client=0") {
return
}
// If they've already configured http2 with
// golang.org/x/net/http2 instead of the bundled copy, try to
// get at its http2.Transport value (via the the "https"
// altproto map) so we can call CloseIdleConnections on it if
// requested. (Issue 22891)
altProto, _ := t.altProto.Load().(map[string]RoundTripper)
if rv := reflect.ValueOf(altProto["https"]); rv.IsValid() && rv.Type().Kind() == reflect.Struct && rv.Type().NumField() == 1 {
if v := rv.Field(0); v.CanInterface() {
if h2i, ok := v.Interface().(h2Transport); ok {
t.h2transport = h2i
}
}
}
if t.TLSNextProto != nil {
// This is the documented way to disable http2 on a
// Transport.
return
}
if t.TLSClientConfig != nil || t.Dial != nil || t.DialTLS != nil {
// Be conservative and don't automatically enable
// http2 if they've specified a custom TLS config or
// custom dialers. Let them opt-in themselves via
// http2.ConfigureTransport so we don't surprise them
// by modifying their tls.Config. Issue 14275.
return
}
t2, err := http2configureTransport(t)
if err != nil {
log.Printf("Error enabling Transport HTTP/2 support: %v", err)
return
}
t.h2transport = t2
// Auto-configure the http2.Transport's MaxHeaderListSize from
// the http.Transport's MaxResponseHeaderBytes. They don't
// exactly mean the same thing, but they're close.
//
// TODO: also add this to x/net/http2.Configure Transport, behind
// a +build go1.7 build tag:
if limit1 := t.MaxResponseHeaderBytes; limit1 != 0 && t2.MaxHeaderListSize == 0 {
const h2max = 1<<32 - 1
if limit1 >= h2max {
t2.MaxHeaderListSize = h2max
} else {
t2.MaxHeaderListSize = uint32(limit1)
}
}
}
// ProxyFromEnvironment returns the URL of the proxy to use for a
// given request, as indicated by the environment variables
// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions
// thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https
// requests.
//
// The environment values may be either a complete URL or a
// "host[:port]", in which case the "http" scheme is assumed.
// An error is returned if the value is a different form.
//
// A nil URL and nil error are returned if no proxy is defined in the
// environment, or a proxy should not be used for the given request,
// as defined by NO_PROXY.
//
// As a special case, if req.URL.Host is "localhost" (with or without
// a port number), then a nil URL and nil error will be returned.
func ProxyFromEnvironment(req *Request) (*url.URL, error) {
return envProxyFunc()(req.URL)
}
// ProxyURL returns a proxy function (for use in a Transport)
// that always returns the same URL.
func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
return func(*Request) (*url.URL, error) {
return fixedURL, nil
}
}
// transportRequest is a wrapper around a *Request that adds
// optional extra headers to write and stores any error to return
// from roundTrip.
type transportRequest struct {
*Request // original request, not to be mutated
extra Header // extra headers to write, or nil
trace *httptrace.ClientTrace // optional
mu sync.Mutex // guards err
err error // first setError value for mapRoundTripError to consider
}
func (tr *transportRequest) extraHeaders() Header {
if tr.extra == nil {
tr.extra = make(Header)
}
return tr.extra
}
func (tr *transportRequest) setError(err error) {
tr.mu.Lock()
if tr.err == nil {
tr.err = err
}
tr.mu.Unlock()
}
// roundTrip implements a RoundTripper over HTTP.
func (t *Transport) roundTrip(req *Request) (*Response, error) {
t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
ctx := req.Context()
trace := httptrace.ContextClientTrace(ctx)
if req.URL == nil {
req.closeBody()
return nil, errors.New("http: nil Request.URL")
}
if req.Header == nil {
req.closeBody()
return nil, errors.New("http: nil Request.Header")
}
scheme := req.URL.Scheme
isHTTP := scheme == "http" || scheme == "https"
if isHTTP {
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("net/http: invalid header field name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, fmt.Errorf("net/http: invalid header field value %q for key %v", v, k)
}
}
}
}
altProto, _ := t.altProto.Load().(map[string]RoundTripper)
if altRT := altProto[scheme]; altRT != nil {
if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
return resp, err
}
}
if !isHTTP {
req.closeBody()
return nil, &badStringError{"unsupported protocol scheme", scheme}
}
if req.Method != "" && !validMethod(req.Method) {
return nil, fmt.Errorf("net/http: invalid method %q", req.Method)
}
if req.URL.Host == "" {
req.closeBody()
return nil, errors.New("http: no Host in request URL")
}
for {
select {
case <-ctx.Done():
req.closeBody()
return nil, ctx.Err()
default:
}
// treq gets modified by roundTrip, so we need to recreate for each retry.
treq := &transportRequest{Request: req, trace: trace}
cm, err := t.connectMethodForRequest(treq)
if err != nil {
req.closeBody()
return nil, err
}
// Get the cached or newly-created connection to either the
// host (for http or https), the http proxy, or the http proxy
// pre-CONNECTed to https server. In any case, we'll be ready
// to send it requests.
pconn, err := t.getConn(treq, cm)
if err != nil {
t.setReqCanceler(req, nil)
req.closeBody()
return nil, err
}
var resp *Response
if pconn.alt != nil {
// HTTP/2 path.
t.decHostConnCount(cm.key()) // don't count cached http2 conns toward conns per host
t.setReqCanceler(req, nil) // not cancelable with CancelRequest
resp, err = pconn.alt.RoundTrip(req)
} else {
resp, err = pconn.roundTrip(treq)
}
if err == nil {
return resp, nil
}
if !pconn.shouldRetryRequest(req, err) {
// Issue 16465: return underlying net.Conn.Read error from peek,
// as we've historically done.
if e, ok := err.(transportReadFromServerError); ok {
err = e.err
}
return nil, err
}
testHookRoundTripRetried()
// Rewind the body if we're able to. (HTTP/2 does this itself so we only
// need to do it for HTTP/1.1 connections.)
if req.GetBody != nil && pconn.alt == nil {
newReq := *req
var err error
newReq.Body, err = req.GetBody()
if err != nil {
return nil, err
}
req = &newReq
}
}
}
// shouldRetryRequest reports whether we should retry sending a failed
// HTTP request on a new connection. The non-nil input error is the
// error from roundTrip.
func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
if http2isNoCachedConnError(err) {
// Issue 16582: if the user started a bunch of
// requests at once, they can all pick the same conn
// and violate the server's max concurrent streams.
// Instead, match the HTTP/1 behavior for now and dial
// again to get a new TCP connection, rather than failing
// this request.
return true
}
if err == errMissingHost {
// User error.
return false
}
if !pc.isReused() {
// This was a fresh connection. There's no reason the server
// should've hung up on us.
//
// Also, if we retried now, we could loop forever
// creating new connections and retrying if the server
// is just hanging up on us because it doesn't like
// our request (as opposed to sending an error).
return false
}
if _, ok := err.(nothingWrittenError); ok {
// We never wrote anything, so it's safe to retry, if there's no body or we
// can "rewind" the body with GetBody.
return req.outgoingLength() == 0 || req.GetBody != nil
}
if !req.isReplayable() {
// Don't retry non-idempotent requests.
return false
}
if _, ok := err.(transportReadFromServerError); ok {
// We got some non-EOF net.Conn.Read failure reading
// the 1st response byte from the server.
return true
}
if err == errServerClosedIdle {
// The server replied with io.EOF while we were trying to
// read the response. Probably an unfortunately keep-alive
// timeout, just as the client was writing a request.
return true
}
return false // conservatively
}
// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol.
var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
// RegisterProtocol registers a new protocol with scheme.
// The Transport will pass requests using the given scheme to rt.
// It is rt's responsibility to simulate HTTP request semantics.
//
// RegisterProtocol can be used by other packages to provide
// implementations of protocol schemes like "ftp" or "file".
//
// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will
// handle the RoundTrip itself for that one request, as if the
// protocol were not registered.
func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
t.altMu.Lock()
defer t.altMu.Unlock()
oldMap, _ := t.altProto.Load().(map[string]RoundTripper)
if _, exists := oldMap[scheme]; exists {
panic("protocol " + scheme + " already registered")
}
newMap := make(map[string]RoundTripper)
for k, v := range oldMap {
newMap[k] = v
}
newMap[scheme] = rt
t.altProto.Store(newMap)
}
// CloseIdleConnections closes any connections which were previously
// connected from previous requests but are now sitting idle in
// a "keep-alive" state. It does not interrupt any connections currently
// in use.
func (t *Transport) CloseIdleConnections() {
t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
t.idleMu.Lock()
m := t.idleConn
t.idleConn = nil
t.idleConnCh = nil
t.wantIdle = true
t.idleLRU = connLRU{}
t.idleMu.Unlock()
for _, conns := range m {
for _, pconn := range conns {
pconn.close(errCloseIdleConns)
}
}
if t2 := t.h2transport; t2 != nil {
t2.CloseIdleConnections()
}
}
// CancelRequest cancels an in-flight request by closing its connection.
// CancelRequest should only be called after RoundTrip has returned.
//
// Deprecated: Use Request.WithContext to create a request with a
// cancelable context instead. CancelRequest cannot cancel HTTP/2
// requests.
func (t *Transport) CancelRequest(req *Request) {
t.cancelRequest(req, errRequestCanceled)
}
// Cancel an in-flight request, recording the error value.
func (t *Transport) cancelRequest(req *Request, err error) {
t.reqMu.Lock()
cancel := t.reqCanceler[req]
delete(t.reqCanceler, req)
t.reqMu.Unlock()
if cancel != nil {
cancel(err)
}
}
//
// Private implementation past this point.
//
var (
// proxyConfigOnce guards proxyConfig
envProxyOnce sync.Once
envProxyFuncValue func(*url.URL) (*url.URL, error)
)
// defaultProxyConfig returns a ProxyConfig value looked up
// from the environment. This mitigates expensive lookups
// on some platforms (e.g. Windows).
func envProxyFunc() func(*url.URL) (*url.URL, error) {
envProxyOnce.Do(func() {
envProxyFuncValue = httpproxy.FromEnvironment().ProxyFunc()
})
return envProxyFuncValue
}
// resetProxyConfig is used by tests.
func resetProxyConfig() {
envProxyOnce = sync.Once{}
envProxyFuncValue = nil
}
func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
if port := treq.URL.Port(); !validPort(port) {
return cm, fmt.Errorf("invalid URL port %q", port)
}
cm.targetScheme = treq.URL.Scheme
cm.targetAddr = canonicalAddr(treq.URL)
if t.Proxy != nil {
cm.proxyURL, err = t.Proxy(treq.Request)
if err == nil && cm.proxyURL != nil {
if port := cm.proxyURL.Port(); !validPort(port) {
return cm, fmt.Errorf("invalid proxy URL port %q", port)
}
}
}
return cm, err
}
// proxyAuth returns the Proxy-Authorization header to set
// on requests, if applicable.
func (cm *connectMethod) proxyAuth() string {
if cm.proxyURL == nil {
return ""
}
if u := cm.proxyURL.User; u != nil {
username := u.Username()
password, _ := u.Password()
return "Basic " + basicAuth(username, password)
}
return ""
}
// error values for debugging and testing, not seen by users.
var (
errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
errWantIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
errCloseIdleConns = errors.New("http: CloseIdleConnections called")
errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
errIdleConnTimeout = errors.New("http: idle connection timeout")
errNotCachingH2Conn = errors.New("http: not caching alternate protocol's connections")
// errServerClosedIdle is not seen by users for idempotent requests, but may be
// seen by a user if the server shuts down an idle connection and sends its FIN
// in flight with already-written POST body bytes from the client.
// See https://github.com/golang/go/issues/19943#issuecomment-355607646
errServerClosedIdle = errors.New("http: server closed idle connection")
)
// transportReadFromServerError is used by Transport.readLoop when the
// 1 byte peek read fails and we're actually anticipating a response.
// Usually this is just due to the inherent keep-alive shut down race,
// where the server closed the connection at the same time the client
// wrote. The underlying err field is usually io.EOF or some
// ECONNRESET sort of thing which varies by platform. But it might be
// the user's custom net.Conn.Read error too, so we carry it along for
// them to return from Transport.RoundTrip.
type transportReadFromServerError struct {
err error
}
func (e transportReadFromServerError) Error() string {
return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
}
func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
if err := t.tryPutIdleConn(pconn); err != nil {
pconn.close(err)
}
}
func (t *Transport) maxIdleConnsPerHost() int {
if v := t.MaxIdleConnsPerHost; v != 0 {
return v
}
return DefaultMaxIdleConnsPerHost
}
// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
// a new request.
// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
// an error explaining why it wasn't registered.
// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that.
func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
return errKeepAlivesDisabled
}
if pconn.isBroken() {
return errConnBroken
}
if pconn.alt != nil {
return errNotCachingH2Conn
}
pconn.markReused()
key := pconn.cacheKey
t.idleMu.Lock()
defer t.idleMu.Unlock()
waitingDialer := t.idleConnCh[key]
select {
case waitingDialer <- pconn:
// We're done with this pconn and somebody else is
// currently waiting for a conn of this type (they're
// actively dialing, but this conn is ready
// first). Chrome calls this socket late binding. See
// https://insouciant.org/tech/connection-management-in-chromium/
return nil
default:
if waitingDialer != nil {
// They had populated this, but their dial won
// first, so we can clean up this map entry.
delete(t.idleConnCh, key)
}
}
if t.wantIdle {
return errWantIdle
}
if t.idleConn == nil {
t.idleConn = make(map[connectMethodKey][]*persistConn)
}
idles := t.idleConn[key]
if len(idles) >= t.maxIdleConnsPerHost() {
return errTooManyIdleHost
}
for _, exist := range idles {
if exist == pconn {
log.Fatalf("dup idle pconn %p in freelist", pconn)
}
}
t.idleConn[key] = append(idles, pconn)
t.idleLRU.add(pconn)
if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
oldest := t.idleLRU.removeOldest()
oldest.close(errTooManyIdle)
t.removeIdleConnLocked(oldest)
}
if t.IdleConnTimeout > 0 {
if pconn.idleTimer != nil {
pconn.idleTimer.Reset(t.IdleConnTimeout)
} else {
pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
}
}
pconn.idleAt = time.Now()
return nil
}
// getIdleConnCh returns a channel to receive and return idle
// persistent connection for the given connectMethod.
// It may return nil, if persistent connections are not being used.
func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn {
if t.DisableKeepAlives {
return nil
}
key := cm.key()
t.idleMu.Lock()
defer t.idleMu.Unlock()
t.wantIdle = false
if t.idleConnCh == nil {
t.idleConnCh = make(map[connectMethodKey]chan *persistConn)
}
ch, ok := t.idleConnCh[key]
if !ok {
ch = make(chan *persistConn)
t.idleConnCh[key] = ch
}
return ch
}
func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn, idleSince time.Time) {
key := cm.key()
t.idleMu.Lock()
defer t.idleMu.Unlock()
for {
pconns, ok := t.idleConn[key]
if !ok {
return nil, time.Time{}
}
if len(pconns) == 1 {
pconn = pconns[0]
delete(t.idleConn, key)
} else {
// 2 or more cached connections; use the most
// recently used one at the end.
pconn = pconns[len(pconns)-1]
t.idleConn[key] = pconns[:len(pconns)-1]
}
t.idleLRU.remove(pconn)
if pconn.isBroken() {
// There is a tiny window where this is
// possible, between the connecting dying and
// the persistConn readLoop calling
// Transport.removeIdleConn. Just skip it and
// carry on.
continue
}
return pconn, pconn.idleAt
}
}
// removeIdleConn marks pconn as dead.
func (t *Transport) removeIdleConn(pconn *persistConn) {
t.idleMu.Lock()
defer t.idleMu.Unlock()
t.removeIdleConnLocked(pconn)
}
// t.idleMu must be held.
func (t *Transport) removeIdleConnLocked(pconn *persistConn) {
if pconn.idleTimer != nil {
pconn.idleTimer.Stop()
}
t.idleLRU.remove(pconn)
key := pconn.cacheKey
pconns := t.idleConn[key]
switch len(pconns) {
case 0:
// Nothing
case 1:
if pconns[0] == pconn {
delete(t.idleConn, key)
}
default:
for i, v := range pconns {
if v != pconn {
continue
}
// Slide down, keeping most recently-used
// conns at the end.
copy(pconns[i:], pconns[i+1:])
t.idleConn[key] = pconns[:len(pconns)-1]
break
}
}
}
func (t *Transport) setReqCanceler(r *Request, fn func(error)) {
t.reqMu.Lock()
defer t.reqMu.Unlock()
if t.reqCanceler == nil {
t.reqCanceler = make(map[*Request]func(error))
}
if fn != nil {
t.reqCanceler[r] = fn
} else {
delete(t.reqCanceler, r)
}
}
// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
// for the request, we don't set the function and return false.
// Since CancelRequest will clear the canceler, we can use the return value to detect if
// the request was canceled since the last setReqCancel call.
func (t *Transport) replaceReqCanceler(r *Request, fn func(error)) bool {
t.reqMu.Lock()
defer t.reqMu.Unlock()
_, ok := t.reqCanceler[r]
if !ok {
return false
}
if fn != nil {
t.reqCanceler[r] = fn
} else {
delete(t.reqCanceler, r)
}
return true
}
var zeroDialer net.Dialer
func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) {
if t.DialContext != nil {
return t.DialContext(ctx, network, addr)
}
if t.Dial != nil {
c, err := t.Dial(network, addr)
if c == nil && err == nil {
err = errors.New("net/http: Transport.Dial hook returned (nil, nil)")
}
return c, err
}
return zeroDialer.DialContext(ctx, network, addr)
}
// getConn dials and creates a new persistConn to the target as
// specified in the connectMethod. This includes doing a proxy CONNECT
// and/or setting up TLS. If this doesn't return an error, the persistConn
// is ready to write requests to.
func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (*persistConn, error) {
req := treq.Request
trace := treq.trace
ctx := req.Context()
if trace != nil && trace.GetConn != nil {
trace.GetConn(cm.addr())
}
if pc, idleSince := t.getIdleConn(cm); pc != nil {
if trace != nil && trace.GotConn != nil {
trace.GotConn(pc.gotIdleConnTrace(idleSince))
}
// set request canceler to some non-nil function so we
// can detect whether it was cleared between now and when
// we enter roundTrip
t.setReqCanceler(req, func(error) {})
return pc, nil
}
type dialRes struct {
pc *persistConn
err error
}
dialc := make(chan dialRes)
cmKey := cm.key()
// Copy these hooks so we don't race on the postPendingDial in
// the goroutine we launch. Issue 11136.
testHookPrePendingDial := testHookPrePendingDial
testHookPostPendingDial := testHookPostPendingDial
handlePendingDial := func() {
testHookPrePendingDial()
go func() {
if v := <-dialc; v.err == nil {
t.putOrCloseIdleConn(v.pc)
} else {
t.decHostConnCount(cmKey)
}
testHookPostPendingDial()
}()
}
cancelc := make(chan error, 1)
t.setReqCanceler(req, func(err error) { cancelc <- err })
if t.MaxConnsPerHost > 0 {
select {
case <-t.incHostConnCount(cmKey):
// count below conn per host limit; proceed
case pc := <-t.getIdleConnCh(cm):
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{Conn: pc.conn, Reused: pc.isReused()})
}
return pc, nil
case <-req.Cancel:
return nil, errRequestCanceledConn
case <-req.Context().Done():
return nil, req.Context().Err()
case err := <-cancelc:
if err == errRequestCanceled {
err = errRequestCanceledConn
}
return nil, err
}
}
go func() {
pc, err := t.dialConn(ctx, cm)
dialc <- dialRes{pc, err}
}()
idleConnCh := t.getIdleConnCh(cm)
select {
case v := <-dialc:
// Our dial finished.
if v.pc != nil {
if trace != nil && trace.GotConn != nil && v.pc.alt == nil {
trace.GotConn(httptrace.GotConnInfo{Conn: v.pc.conn})
}
return v.pc, nil
}
// Our dial failed. See why to return a nicer error
// value.
t.decHostConnCount(cmKey)
select {
case <-req.Cancel:
// It was an error due to cancelation, so prioritize that
// error value. (Issue 16049)
return nil, errRequestCanceledConn
case <-req.Context().Done():
return nil, req.Context().Err()
case err := <-cancelc:
if err == errRequestCanceled {
err = errRequestCanceledConn
}
return nil, err
default:
// It wasn't an error due to cancelation, so
// return the original error message:
return nil, v.err
}
case pc := <-idleConnCh:
// Another request finished first and its net.Conn
// became available before our dial. Or somebody
// else's dial that they didn't use.
// But our dial is still going, so give it away
// when it finishes:
handlePendingDial()
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{Conn: pc.conn, Reused: pc.isReused()})
}
return pc, nil
case <-req.Cancel:
handlePendingDial()
return nil, errRequestCanceledConn
case <-req.Context().Done():
handlePendingDial()
return nil, req.Context().Err()
case err := <-cancelc:
handlePendingDial()
if err == errRequestCanceled {
err = errRequestCanceledConn
}
return nil, err
}
}
// incHostConnCount increments the count of connections for a
// given host. It returns an already-closed channel if the count
// is not at its limit; otherwise it returns a channel which is
// notified when the count is below the limit.
func (t *Transport) incHostConnCount(cmKey connectMethodKey) <-chan struct{} {
if t.MaxConnsPerHost <= 0 {
return connsPerHostClosedCh
}
t.connCountMu.Lock()
defer t.connCountMu.Unlock()
if t.connPerHostCount[cmKey] == t.MaxConnsPerHost {
if t.connPerHostAvailable == nil {
t.connPerHostAvailable = make(map[connectMethodKey]chan struct{})
}
ch, ok := t.connPerHostAvailable[cmKey]
if !ok {
ch = make(chan struct{})
t.connPerHostAvailable[cmKey] = ch
}
return ch
}
if t.connPerHostCount == nil {
t.connPerHostCount = make(map[connectMethodKey]int)
}
t.connPerHostCount[cmKey]++
// return a closed channel to avoid race: if decHostConnCount is called
// after incHostConnCount and during the nil check, decHostConnCount
// will delete the channel since it's not being listened on yet.
return connsPerHostClosedCh
}
// decHostConnCount decrements the count of connections
// for a given host.
// See Transport.MaxConnsPerHost.
func (t *Transport) decHostConnCount(cmKey connectMethodKey) {
if t.MaxConnsPerHost <= 0 {
return
}
t.connCountMu.Lock()
defer t.connCountMu.Unlock()
t.connPerHostCount[cmKey]--
select {
case t.connPerHostAvailable[cmKey] <- struct{}{}:
default:
// close channel before deleting avoids getConn waiting forever in
// case getConn has reference to channel but hasn't started waiting.
// This could lead to more than MaxConnsPerHost in the unlikely case
// that > 1 go routine has fetched the channel but none started waiting.
if t.connPerHostAvailable[cmKey] != nil {
close(t.connPerHostAvailable[cmKey])
}
delete(t.connPerHostAvailable, cmKey)
}
if t.connPerHostCount[cmKey] == 0 {
delete(t.connPerHostCount, cmKey)
}
}
// connCloseListener wraps a connection, the transport that dialed it
// and the connected-to host key so the host connection count can be
// transparently decremented by whatever closes the embedded connection.
type connCloseListener struct {
net.Conn
t *Transport
cmKey connectMethodKey
didClose int32
}
func (c *connCloseListener) Close() error {
if atomic.AddInt32(&c.didClose, 1) != 1 {
return nil
}
err := c.Conn.Close()
c.t.decHostConnCount(c.cmKey)
return err
}
// The connect method and the transport can both specify a TLS
// Host name. The transport's name takes precedence if present.
func chooseTLSHost(cm connectMethod, t *Transport) string {
tlsHost := ""
if t.TLSClientConfig != nil {
tlsHost = t.TLSClientConfig.ServerName
}
if tlsHost == "" {
tlsHost = cm.tlsHost()
}
return tlsHost
}
// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS
// tunnel, this function establishes a nested TLS session inside the encrypted channel.
// The remote endpoint's name may be overridden by TLSClientConfig.ServerName.
func (pconn *persistConn) addTLS(name string, trace *httptrace.ClientTrace) error {
// Initiate TLS and check remote host name against certificate.
cfg := cloneTLSConfig(pconn.t.TLSClientConfig)
if cfg.ServerName == "" {
cfg.ServerName = name
}
plainConn := pconn.conn
tlsConn := tls.Client(plainConn, cfg)
errc := make(chan error, 2)
var timer *time.Timer // for canceling TLS handshake
if d := pconn.t.TLSHandshakeTimeout; d != 0 {
timer = time.AfterFunc(d, func() {
errc <- tlsHandshakeTimeoutError{}
})
}
go func() {
if trace != nil && trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
err := tlsConn.Handshake()
if timer != nil {
timer.Stop()
}
errc <- err
}()
if err := <-errc; err != nil {
plainConn.Close()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tls.ConnectionState{}, err)
}
return err
}
cs := tlsConn.ConnectionState()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(cs, nil)
}
pconn.tlsState = &cs
pconn.conn = tlsConn
return nil
}
func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (*persistConn, error) {
pconn := &persistConn{
t: t,
cacheKey: cm.key(),
reqch: make(chan requestAndChan, 1),
writech: make(chan writeRequest, 1),
closech: make(chan struct{}),
writeErrCh: make(chan error, 1),
writeLoopDone: make(chan struct{}),
}
trace := httptrace.ContextClientTrace(ctx)
wrapErr := func(err error) error {
if cm.proxyURL != nil {
// Return a typed error, per Issue 16997
return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err}
}
return err
}
if cm.scheme() == "https" && t.DialTLS != nil {
var err error
pconn.conn, err = t.DialTLS("tcp", cm.addr())
if err != nil {
return nil, wrapErr(err)
}
if pconn.conn == nil {
return nil, wrapErr(errors.New("net/http: Transport.DialTLS returned (nil, nil)"))
}
if tc, ok := pconn.conn.(*tls.Conn); ok {
// Handshake here, in case DialTLS didn't. TLSNextProto below
// depends on it for knowing the connection state.
if trace != nil && trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
if err := tc.Handshake(); err != nil {
go pconn.conn.Close()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tls.ConnectionState{}, err)
}
return nil, err
}
cs := tc.ConnectionState()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(cs, nil)
}
pconn.tlsState = &cs
}
} else {
conn, err := t.dial(ctx, "tcp", cm.addr())
if err != nil {
return nil, wrapErr(err)
}
pconn.conn = conn
if cm.scheme() == "https" {
var firstTLSHost string
if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil {
return nil, wrapErr(err)
}
if err = pconn.addTLS(firstTLSHost, trace); err != nil {
return nil, wrapErr(err)
}
}
}
// Proxy setup.
switch {
case cm.proxyURL == nil:
// Do nothing. Not using a proxy.
case cm.proxyURL.Scheme == "socks5":
conn := pconn.conn
d := socksNewDialer("tcp", conn.RemoteAddr().String())
if u := cm.proxyURL.User; u != nil {
auth := &socksUsernamePassword{
Username: u.Username(),
}
auth.Password, _ = u.Password()
d.AuthMethods = []socksAuthMethod{
socksAuthMethodNotRequired,
socksAuthMethodUsernamePassword,
}
d.Authenticate = auth.Authenticate
}
if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil {
conn.Close()
return nil, err
}
case cm.targetScheme == "http":
pconn.isProxy = true
if pa := cm.proxyAuth(); pa != "" {
pconn.mutateHeaderFunc = func(h Header) {
h.Set("Proxy-Authorization", pa)
}
}
case cm.targetScheme == "https":
conn := pconn.conn
hdr := t.ProxyConnectHeader
if hdr == nil {
hdr = make(Header)
}
connectReq := &Request{
Method: "CONNECT",
URL: &url.URL{Opaque: cm.targetAddr},
Host: cm.targetAddr,
Header: hdr,
}
if pa := cm.proxyAuth(); pa != "" {
connectReq.Header.Set("Proxy-Authorization", pa)
}
connectReq.Write(conn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(conn)
resp, err := ReadResponse(br, connectReq)
if err != nil {
conn.Close()
return nil, err
}
if resp.StatusCode != 200 {
f := strings.SplitN(resp.Status, " ", 2)
conn.Close()
if len(f) < 2 {
return nil, errors.New("unknown status code")
}
return nil, errors.New(f[1])
}
}
if cm.proxyURL != nil && cm.targetScheme == "https" {
if err := pconn.addTLS(cm.tlsHost(), trace); err != nil {
return nil, err
}
}
if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
return &persistConn{alt: next(cm.targetAddr, pconn.conn.(*tls.Conn))}, nil
}
}
if t.MaxConnsPerHost > 0 {
pconn.conn = &connCloseListener{Conn: pconn.conn, t: t, cmKey: pconn.cacheKey}
}
pconn.br = bufio.NewReader(pconn)
pconn.bw = bufio.NewWriter(persistConnWriter{pconn})
go pconn.readLoop()
go pconn.writeLoop()
return pconn, nil
}
// persistConnWriter is the io.Writer written to by pc.bw.
// It accumulates the number of bytes written to the underlying conn,
// so the retry logic can determine whether any bytes made it across
// the wire.
// This is exactly 1 pointer field wide so it can go into an interface
// without allocation.
type persistConnWriter struct {
pc *persistConn
}
func (w persistConnWriter) Write(p []byte) (n int, err error) {
n, err = w.pc.conn.Write(p)
w.pc.nwrite += int64(n)
return
}
// connectMethod is the map key (in its String form) for keeping persistent
// TCP connections alive for subsequent HTTP requests.
//
// A connect method may be of the following types:
//
// Cache key form Description
// ----------------- -------------------------
// |http|foo.com http directly to server, no proxy
// |https|foo.com https directly to server, no proxy
// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
// http://proxy.com|http http to proxy, http to anywhere after that
// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com
// https://proxy.com|http https to proxy, http to anywhere after that
//
type connectMethod struct {
proxyURL *url.URL // nil for no proxy, else full proxy URL
targetScheme string // "http" or "https"
// If proxyURL specifies an http or https proxy, and targetScheme is http (not https),
// then targetAddr is not included in the connect method key, because the socket can
// be reused for different targetAddr values.
targetAddr string
}
func (cm *connectMethod) key() connectMethodKey {
proxyStr := ""
targetAddr := cm.targetAddr
if cm.proxyURL != nil {
proxyStr = cm.proxyURL.String()
if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" {
targetAddr = ""
}
}
return connectMethodKey{
proxy: proxyStr,
scheme: cm.targetScheme,
addr: targetAddr,
}
}
// scheme returns the first hop scheme: http, https, or socks5
func (cm *connectMethod) scheme() string {
if cm.proxyURL != nil {
return cm.proxyURL.Scheme
}
return cm.targetScheme
}
// addr returns the first hop "host:port" to which we need to TCP connect.
func (cm *connectMethod) addr() string {
if cm.proxyURL != nil {
return canonicalAddr(cm.proxyURL)
}
return cm.targetAddr
}
// tlsHost returns the host name to match against the peer's
// TLS certificate.
func (cm *connectMethod) tlsHost() string {
h := cm.targetAddr
if hasPort(h) {
h = h[:strings.LastIndex(h, ":")]
}
return h
}
// connectMethodKey is the map key version of connectMethod, with a
// stringified proxy URL (or the empty string) instead of a pointer to
// a URL.
type connectMethodKey struct {
proxy, scheme, addr string
}
func (k connectMethodKey) String() string {
// Only used by tests.
return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr)
}
// persistConn wraps a connection, usually a persistent one
// (but may be used for non-keep-alive requests as well)
type persistConn struct {
// alt optionally specifies the TLS NextProto RoundTripper.
// This is used for HTTP/2 today and future protocols later.
// If it's non-nil, the rest of the fields are unused.
alt RoundTripper
t *Transport
cacheKey connectMethodKey
conn net.Conn
tlsState *tls.ConnectionState
br *bufio.Reader // from conn
bw *bufio.Writer // to conn
nwrite int64 // bytes written
reqch chan requestAndChan // written by roundTrip; read by readLoop
writech chan writeRequest // written by roundTrip; read by writeLoop
closech chan struct{} // closed when conn closed
isProxy bool
sawEOF bool // whether we've seen EOF from conn; owned by readLoop
readLimit int64 // bytes allowed to be read; owned by readLoop
// writeErrCh passes the request write error (usually nil)
// from the writeLoop goroutine to the readLoop which passes
// it off to the res.Body reader, which then uses it to decide
// whether or not a connection can be reused. Issue 7569.
writeErrCh chan error
writeLoopDone chan struct{} // closed when write loop ends
// Both guarded by Transport.idleMu:
idleAt time.Time // time it last become idle
idleTimer *time.Timer // holding an AfterFunc to close it
mu sync.Mutex // guards following fields
numExpectedResponses int
closed error // set non-nil when conn is closed, before closech is closed
canceledErr error // set non-nil if conn is canceled
broken bool // an error has happened on this connection; marked broken so it's not reused.
reused bool // whether conn has had successful request/response and is being reused.
// mutateHeaderFunc is an optional func to modify extra
// headers on each outbound request before it's written. (the
// original Request given to RoundTrip is not modified)
mutateHeaderFunc func(Header)
}
func (pc *persistConn) maxHeaderResponseSize() int64 {
if v := pc.t.MaxResponseHeaderBytes; v != 0 {
return v
}
return 10 << 20 // conservative default; same as http2
}
func (pc *persistConn) Read(p []byte) (n int, err error) {
if pc.readLimit <= 0 {
return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize())
}
if int64(len(p)) > pc.readLimit {
p = p[:pc.readLimit]
}
n, err = pc.conn.Read(p)
if err == io.EOF {
pc.sawEOF = true
}
pc.readLimit -= int64(n)
return
}
// isBroken reports whether this connection is in a known broken state.
func (pc *persistConn) isBroken() bool {
pc.mu.Lock()
b := pc.closed != nil
pc.mu.Unlock()
return b
}
// canceled returns non-nil if the connection was closed due to
// CancelRequest or due to context cancelation.
func (pc *persistConn) canceled() error {
pc.mu.Lock()
defer pc.mu.Unlock()
return pc.canceledErr
}
// isReused reports whether this connection is in a known broken state.
func (pc *persistConn) isReused() bool {
pc.mu.Lock()
r := pc.reused
pc.mu.Unlock()
return r
}
func (pc *persistConn) gotIdleConnTrace(idleAt time.Time) (t httptrace.GotConnInfo) {
pc.mu.Lock()
defer pc.mu.Unlock()
t.Reused = pc.reused
t.Conn = pc.conn
t.WasIdle = true
if !idleAt.IsZero() {
t.IdleTime = time.Since(idleAt)
}
return
}
func (pc *persistConn) cancelRequest(err error) {
pc.mu.Lock()
defer pc.mu.Unlock()
pc.canceledErr = err
pc.closeLocked(errRequestCanceled)
}
// closeConnIfStillIdle closes the connection if it's still sitting idle.
// This is what's called by the persistConn's idleTimer, and is run in its
// own goroutine.
func (pc *persistConn) closeConnIfStillIdle() {
t := pc.t
t.idleMu.Lock()
defer t.idleMu.Unlock()
if _, ok := t.idleLRU.m[pc]; !ok {
// Not idle.
return
}
t.removeIdleConnLocked(pc)
pc.close(errIdleConnTimeout)
}
// mapRoundTripError returns the appropriate error value for
// persistConn.roundTrip.
//
// The provided err is the first error that (*persistConn).roundTrip
// happened to receive from its select statement.
//
// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
// started writing the request.
func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
if err == nil {
return nil
}
// If the request was canceled, that's better than network
// failures that were likely the result of tearing down the
// connection.
if cerr := pc.canceled(); cerr != nil {
return cerr
}
// See if an error was set explicitly.
req.mu.Lock()
reqErr := req.err
req.mu.Unlock()
if reqErr != nil {
return reqErr
}
if err == errServerClosedIdle {
// Don't decorate
return err
}
if _, ok := err.(transportReadFromServerError); ok {
// Don't decorate
return err
}
if pc.isBroken() {
<-pc.writeLoopDone
if pc.nwrite == startBytesWritten {
return nothingWrittenError{err}
}
return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
}
return err
}
// errCallerOwnsConn is an internal sentinel error used when we hand
// off a writable response.Body to the caller. We use this to prevent
// closing a net.Conn that is now owned by the caller.
var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn")
func (pc *persistConn) readLoop() {
closeErr := errReadLoopExiting // default value, if not changed below
defer func() {
pc.close(closeErr)
pc.t.removeIdleConn(pc)
}()
tryPutIdleConn := func(trace *httptrace.ClientTrace) bool {
if err := pc.t.tryPutIdleConn(pc); err != nil {
closeErr = err
if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled {
trace.PutIdleConn(err)
}
return false
}
if trace != nil && trace.PutIdleConn != nil {
trace.PutIdleConn(nil)
}
return true
}
// eofc is used to block caller goroutines reading from Response.Body
// at EOF until this goroutines has (potentially) added the connection
// back to the idle pool.
eofc := make(chan struct{})
defer close(eofc) // unblock reader on errors
// Read this once, before loop starts. (to avoid races in tests)
testHookMu.Lock()
testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
testHookMu.Unlock()
alive := true
for alive {
pc.readLimit = pc.maxHeaderResponseSize()
_, err := pc.br.Peek(1)
pc.mu.Lock()
if pc.numExpectedResponses == 0 {
pc.readLoopPeekFailLocked(err)
pc.mu.Unlock()
return
}
pc.mu.Unlock()
rc := <-pc.reqch
trace := httptrace.ContextClientTrace(rc.req.Context())
var resp *Response
if err == nil {
resp, err = pc.readResponse(rc, trace)
} else {
err = transportReadFromServerError{err}
closeErr = err
}
if err != nil {
if pc.readLimit <= 0 {
err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
}
select {
case rc.ch <- responseAndError{err: err}:
case <-rc.callerGone:
return
}
return
}
pc.readLimit = maxInt64 // effictively no limit for response bodies
pc.mu.Lock()
pc.numExpectedResponses--
pc.mu.Unlock()
bodyWritable := resp.bodyIsWritable()
hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
if resp.Close || rc.req.Close || resp.StatusCode <= 199 || bodyWritable {
// Don't do keep-alive on error if either party requested a close
// or we get an unexpected informational (1xx) response.
// StatusCode 100 is already handled above.
alive = false
}
if !hasBody {
pc.t.setReqCanceler(rc.req, nil)
// Put the idle conn back into the pool before we send the response
// so if they process it quickly and make another request, they'll
// get this same conn. But we use the unbuffered channel 'rc'
// to guarantee that persistConn.roundTrip got out of its select
// potentially waiting for this persistConn to close.
// but after
alive = alive &&
!pc.sawEOF &&
pc.wroteRequest() &&
tryPutIdleConn(trace)
if bodyWritable {
closeErr = errCallerOwnsConn
}
select {
case rc.ch <- responseAndError{res: resp}:
case <-rc.callerGone:
return
}
// Now that they've read from the unbuffered channel, they're safely
// out of the select that also waits on this goroutine to die, so
// we're allowed to exit now if needed (if alive is false)
testHookReadLoopBeforeNextRead()
continue
}
waitForBodyRead := make(chan bool, 2)
body := &bodyEOFSignal{
body: resp.Body,
earlyCloseFn: func() error {
waitForBodyRead <- false
<-eofc // will be closed by deferred call at the end of the function
return nil
},
fn: func(err error) error {
isEOF := err == io.EOF
waitForBodyRead <- isEOF
if isEOF {
<-eofc // see comment above eofc declaration
} else if err != nil {
if cerr := pc.canceled(); cerr != nil {
return cerr
}
}
return err
},
}
resp.Body = body
if rc.addedGzip && strings.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
resp.Body = &gzipReader{body: body}
resp.Header.Del("Content-Encoding")
resp.Header.Del("Content-Length")
resp.ContentLength = -1
resp.Uncompressed = true
}
select {
case rc.ch <- responseAndError{res: resp}:
case <-rc.callerGone:
return
}
// Before looping back to the top of this function and peeking on
// the bufio.Reader, wait for the caller goroutine to finish
// reading the response body. (or for cancelation or death)
select {
case bodyEOF := <-waitForBodyRead:
pc.t.setReqCanceler(rc.req, nil) // before pc might return to idle pool
alive = alive &&
bodyEOF &&
!pc.sawEOF &&
pc.wroteRequest() &&
tryPutIdleConn(trace)
if bodyEOF {
eofc <- struct{}{}
}
case <-rc.req.Cancel:
alive = false
pc.t.CancelRequest(rc.req)
case <-rc.req.Context().Done():
alive = false
pc.t.cancelRequest(rc.req, rc.req.Context().Err())
case <-pc.closech:
alive = false
}
testHookReadLoopBeforeNextRead()
}
}
func (pc *persistConn) readLoopPeekFailLocked(peekErr error) {
if pc.closed != nil {
return
}
if n := pc.br.Buffered(); n > 0 {
buf, _ := pc.br.Peek(n)
log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr)
}
if peekErr == io.EOF {
// common case.
pc.closeLocked(errServerClosedIdle)
} else {
pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %v", peekErr))
}
}
// readResponse reads an HTTP response (or two, in the case of "Expect:
// 100-continue") from the server. It returns the final non-100 one.
// trace is optional.
func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *Response, err error) {
if trace != nil && trace.GotFirstResponseByte != nil {
if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 {
trace.GotFirstResponseByte()
}
}
num1xx := 0 // number of informational 1xx headers received
const max1xxResponses = 5 // arbitrary bound on number of informational responses
continueCh := rc.continueCh
for {
resp, err = ReadResponse(pc.br, rc.req)
if err != nil {
return
}
resCode := resp.StatusCode
if continueCh != nil {
if resCode == 100 {
if trace != nil && trace.Got100Continue != nil {
trace.Got100Continue()
}
continueCh <- struct{}{}
continueCh = nil
} else if resCode >= 200 {
close(continueCh)
continueCh = nil
}
}
is1xx := 100 <= resCode && resCode <= 199
// treat 101 as a terminal status, see issue 26161
is1xxNonTerminal := is1xx && resCode != StatusSwitchingProtocols
if is1xxNonTerminal {
num1xx++
if num1xx > max1xxResponses {
return nil, errors.New("net/http: too many 1xx informational responses")
}
pc.readLimit = pc.maxHeaderResponseSize() // reset the limit
if trace != nil && trace.Got1xxResponse != nil {
if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(resp.Header)); err != nil {
return nil, err
}
}
continue
}
break
}
if resp.isProtocolSwitch() {
resp.Body = newReadWriteCloserBody(pc.br, pc.conn)
}
resp.TLS = pc.tlsState
return
}
// waitForContinue returns the function to block until
// any response, timeout or connection close. After any of them,
// the function returns a bool which indicates if the body should be sent.
func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool {
if continueCh == nil {
return nil
}
return func() bool {
timer := time.NewTimer(pc.t.ExpectContinueTimeout)
defer timer.Stop()
select {
case _, ok := <-continueCh:
return ok
case <-timer.C:
return true
case <-pc.closech:
return false
}
}
}
func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser {
body := &readWriteCloserBody{ReadWriteCloser: rwc}
if br.Buffered() != 0 {
body.br = br
}
return body
}
// readWriteCloserBody is the Response.Body type used when we want to
// give users write access to the Body through the underlying
// connection (TCP, unless using custom dialers). This is then
// the concrete type for a Response.Body on the 101 Switching
// Protocols response, as used by WebSockets, h2c, etc.
type readWriteCloserBody struct {
br *bufio.Reader // used until empty
io.ReadWriteCloser
}
func (b *readWriteCloserBody) Read(p []byte) (n int, err error) {
if b.br != nil {
if n := b.br.Buffered(); len(p) > n {
p = p[:n]
}
n, err = b.br.Read(p)
if b.br.Buffered() == 0 {
b.br = nil
}
return n, err
}
return b.ReadWriteCloser.Read(p)
}
// nothingWrittenError wraps a write errors which ended up writing zero bytes.
type nothingWrittenError struct {
error
}
func (pc *persistConn) writeLoop() {
defer close(pc.writeLoopDone)
for {
select {
case wr := <-pc.writech:
startBytesWritten := pc.nwrite
err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
if bre, ok := err.(requestBodyReadError); ok {
err = bre.error
// Errors reading from the user's
// Request.Body are high priority.
// Set it here before sending on the
// channels below or calling
// pc.close() which tears town
// connections and causes other
// errors.
wr.req.setError(err)
}
if err == nil {
err = pc.bw.Flush()
}
if err != nil {
wr.req.Request.closeBody()
if pc.nwrite == startBytesWritten {
err = nothingWrittenError{err}
}
}
pc.writeErrCh <- err // to the body reader, which might recycle us
wr.ch <- err // to the roundTrip function
if err != nil {
pc.close(err)
return
}
case <-pc.closech:
return
}
}
}
// maxWriteWaitBeforeConnReuse is how long the a Transport RoundTrip
// will wait to see the Request's Body.Write result after getting a
// response from the server. See comments in (*persistConn).wroteRequest.
const maxWriteWaitBeforeConnReuse = 50 * time.Millisecond
// wroteRequest is a check before recycling a connection that the previous write
// (from writeLoop above) happened and was successful.
func (pc *persistConn) wroteRequest() bool {
select {
case err := <-pc.writeErrCh:
// Common case: the write happened well before the response, so
// avoid creating a timer.
return err == nil
default:
// Rare case: the request was written in writeLoop above but
// before it could send to pc.writeErrCh, the reader read it
// all, processed it, and called us here. In this case, give the
// write goroutine a bit of time to finish its send.
//
// Less rare case: We also get here in the legitimate case of
// Issue 7569, where the writer is still writing (or stalled),
// but the server has already replied. In this case, we don't
// want to wait too long, and we want to return false so this
// connection isn't re-used.
select {
case err := <-pc.writeErrCh:
return err == nil
case <-time.After(maxWriteWaitBeforeConnReuse):
return false
}
}
}
// responseAndError is how the goroutine reading from an HTTP/1 server
// communicates with the goroutine doing the RoundTrip.
type responseAndError struct {
res *Response // else use this response (see res method)
err error
}
type requestAndChan struct {
req *Request
ch chan responseAndError // unbuffered; always send in select on callerGone
// whether the Transport (as opposed to the user client code)
// added the Accept-Encoding gzip header. If the Transport
// set it, only then do we transparently decode the gzip.
addedGzip bool
// Optional blocking chan for Expect: 100-continue (for send).
// If the request has an "Expect: 100-continue" header and
// the server responds 100 Continue, readLoop send a value
// to writeLoop via this chan.
continueCh chan<- struct{}
callerGone <-chan struct{} // closed when roundTrip caller has returned
}
// A writeRequest is sent by the readLoop's goroutine to the
// writeLoop's goroutine to write a request while the read loop
// concurrently waits on both the write response and the server's
// reply.
type writeRequest struct {
req *transportRequest
ch chan<- error
// Optional blocking chan for Expect: 100-continue (for receive).
// If not nil, writeLoop blocks sending request body until
// it receives from this chan.
continueCh <-chan struct{}
}
type httpError struct {
err string
timeout bool
}
func (e *httpError) Error() string { return e.err }
func (e *httpError) Timeout() bool { return e.timeout }
func (e *httpError) Temporary() bool { return true }
var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
var errRequestCanceled = errors.New("net/http: request canceled")
var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify?
func nop() {}
// testHooks. Always non-nil.
var (
testHookEnterRoundTrip = nop
testHookWaitResLoop = nop
testHookRoundTripRetried = nop
testHookPrePendingDial = nop
testHookPostPendingDial = nop
testHookMu sync.Locker = fakeLocker{} // guards following
testHookReadLoopBeforeNextRead = nop
)
func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
testHookEnterRoundTrip()
if !pc.t.replaceReqCanceler(req.Request, pc.cancelRequest) {
pc.t.putOrCloseIdleConn(pc)
return nil, errRequestCanceled
}
pc.mu.Lock()
pc.numExpectedResponses++
headerFn := pc.mutateHeaderFunc
pc.mu.Unlock()
if headerFn != nil {
headerFn(req.extraHeaders())
}
// Ask for a compressed version if the caller didn't set their
// own value for Accept-Encoding. We only attempt to
// uncompress the gzip stream if we were the layer that
// requested it.
requestedGzip := false
if !pc.t.DisableCompression &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// https://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
requestedGzip = true
req.extraHeaders().Set("Accept-Encoding", "gzip")
}
var continueCh chan struct{}
if req.ProtoAtLeast(1, 1) && req.Body != nil && req.expectsContinue() {
continueCh = make(chan struct{}, 1)
}
if pc.t.DisableKeepAlives {
req.extraHeaders().Set("Connection", "close")
}
gone := make(chan struct{})
defer close(gone)
defer func() {
if err != nil {
pc.t.setReqCanceler(req.Request, nil)
}
}()
const debugRoundTrip = false
// Write the request concurrently with waiting for a response,
// in case the server decides to reply before reading our full
// request body.
startBytesWritten := pc.nwrite
writeErrCh := make(chan error, 1)
pc.writech <- writeRequest{req, writeErrCh, continueCh}
resc := make(chan responseAndError)
pc.reqch <- requestAndChan{
req: req.Request,
ch: resc,
addedGzip: requestedGzip,
continueCh: continueCh,
callerGone: gone,
}
var respHeaderTimer <-chan time.Time
cancelChan := req.Request.Cancel
ctxDoneChan := req.Context().Done()
for {
testHookWaitResLoop()
select {
case err := <-writeErrCh:
if debugRoundTrip {
req.logf("writeErrCh resv: %T/%#v", err, err)
}
if err != nil {
pc.close(fmt.Errorf("write error: %v", err))
return nil, pc.mapRoundTripError(req, startBytesWritten, err)
}
if d := pc.t.ResponseHeaderTimeout; d > 0 {
if debugRoundTrip {
req.logf("starting timer for %v", d)
}
timer := time.NewTimer(d)
defer timer.Stop() // prevent leaks
respHeaderTimer = timer.C
}
case <-pc.closech:
if debugRoundTrip {
req.logf("closech recv: %T %#v", pc.closed, pc.closed)
}
return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
case <-respHeaderTimer:
if debugRoundTrip {
req.logf("timeout waiting for response headers.")
}
pc.close(errTimeout)
return nil, errTimeout
case re := <-resc:
if (re.res == nil) == (re.err == nil) {
panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
}
if debugRoundTrip {
req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
}
if re.err != nil {
return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
}
return re.res, nil
case <-cancelChan:
pc.t.CancelRequest(req.Request)
cancelChan = nil
case <-ctxDoneChan:
pc.t.cancelRequest(req.Request, req.Context().Err())
cancelChan = nil
ctxDoneChan = nil
}
}
}
// tLogKey is a context WithValue key for test debugging contexts containing
// a t.Logf func. See export_test.go's Request.WithT method.
type tLogKey struct{}
func (tr *transportRequest) logf(format string, args ...interface{}) {
if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...interface{})); ok {
logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
}
}
// markReused marks this connection as having been successfully used for a
// request and response.
func (pc *persistConn) markReused() {
pc.mu.Lock()
pc.reused = true
pc.mu.Unlock()
}
// close closes the underlying TCP connection and closes
// the pc.closech channel.
//
// The provided err is only for testing and debugging; in normal
// circumstances it should never be seen by users.
func (pc *persistConn) close(err error) {
pc.mu.Lock()
defer pc.mu.Unlock()
pc.closeLocked(err)
}
func (pc *persistConn) closeLocked(err error) {
if err == nil {
panic("nil error")
}
pc.broken = true
if pc.closed == nil {
pc.closed = err
if pc.alt != nil {
// Do nothing; can only get here via getConn's
// handlePendingDial's putOrCloseIdleConn when
// it turns out the abandoned connection in
// flight ended up negotiating an alternate
// protocol. We don't use the connection
// freelist for http2. That's done by the
// alternate protocol's RoundTripper.
} else {
if err != errCallerOwnsConn {
pc.conn.Close()
}
close(pc.closech)
}
}
pc.mutateHeaderFunc = nil
}
var portMap = map[string]string{
"http": "80",
"https": "443",
"socks5": "1080",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
func canonicalAddr(url *url.URL) string {
addr := url.Hostname()
if v, err := idnaASCII(addr); err == nil {
addr = v
}
port := url.Port()
if port == "" {
port = portMap[url.Scheme]
}
return net.JoinHostPort(addr, port)
}
// bodyEOFSignal is used by the HTTP/1 transport when reading response
// bodies to make sure we see the end of a response body before
// proceeding and reading on the connection again.
//
// It wraps a ReadCloser but runs fn (if non-nil) at most
// once, right before its final (error-producing) Read or Close call
// returns. fn should return the new error to return from Read or Close.
//
// If earlyCloseFn is non-nil and Close is called before io.EOF is
// seen, earlyCloseFn is called instead of fn, and its return value is
// the return value from Close.
type bodyEOFSignal struct {
body io.ReadCloser
mu sync.Mutex // guards following 4 fields
closed bool // whether Close has been called
rerr error // sticky Read error
fn func(error) error // err will be nil on Read io.EOF
earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
}
var errReadOnClosedResBody = errors.New("http: read on closed response body")
func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
es.mu.Lock()
closed, rerr := es.closed, es.rerr
es.mu.Unlock()
if closed {
return 0, errReadOnClosedResBody
}
if rerr != nil {
return 0, rerr
}
n, err = es.body.Read(p)
if err != nil {
es.mu.Lock()
defer es.mu.Unlock()
if es.rerr == nil {
es.rerr = err
}
err = es.condfn(err)
}
return
}
func (es *bodyEOFSignal) Close() error {
es.mu.Lock()
defer es.mu.Unlock()
if es.closed {
return nil
}
es.closed = true
if es.earlyCloseFn != nil && es.rerr != io.EOF {
return es.earlyCloseFn()
}
err := es.body.Close()
return es.condfn(err)
}
// caller must hold es.mu.
func (es *bodyEOFSignal) condfn(err error) error {
if es.fn == nil {
return err
}
err = es.fn(err)
es.fn = nil
return err
}
// gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read
type gzipReader struct {
body *bodyEOFSignal // underlying HTTP/1 response body framing
zr *gzip.Reader // lazily-initialized gzip reader
zerr error // any error from gzip.NewReader; sticky
}
func (gz *gzipReader) Read(p []byte) (n int, err error) {
if gz.zr == nil {
if gz.zerr == nil {
gz.zr, gz.zerr = gzip.NewReader(gz.body)
}
if gz.zerr != nil {
return 0, gz.zerr
}
}
gz.body.mu.Lock()
if gz.body.closed {
err = errReadOnClosedResBody
}
gz.body.mu.Unlock()
if err != nil {
return 0, err
}
return gz.zr.Read(p)
}
func (gz *gzipReader) Close() error {
return gz.body.Close()
}
type readerAndCloser struct {
io.Reader
io.Closer
}
type tlsHandshakeTimeoutError struct{}
func (tlsHandshakeTimeoutError) Timeout() bool { return true }
func (tlsHandshakeTimeoutError) Temporary() bool { return true }
func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
// fakeLocker is a sync.Locker which does nothing. It's used to guard
// test-only fields when not under test, to avoid runtime atomic
// overhead.
type fakeLocker struct{}
func (fakeLocker) Lock() {}
func (fakeLocker) Unlock() {}
// clneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if
// cfg is nil. This is safe to call even if cfg is in active use by a TLS
// client or server.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}
type connLRU struct {
ll *list.List // list.Element.Value type is of *persistConn
m map[*persistConn]*list.Element
}
// add adds pc to the head of the linked list.
func (cl *connLRU) add(pc *persistConn) {
if cl.ll == nil {
cl.ll = list.New()
cl.m = make(map[*persistConn]*list.Element)
}
ele := cl.ll.PushFront(pc)
if _, ok := cl.m[pc]; ok {
panic("persistConn was already in LRU")
}
cl.m[pc] = ele
}
func (cl *connLRU) removeOldest() *persistConn {
ele := cl.ll.Back()
pc := ele.Value.(*persistConn)
cl.ll.Remove(ele)
delete(cl.m, pc)
return pc
}
// remove removes pc from cl.
func (cl *connLRU) remove(pc *persistConn) {
if ele, ok := cl.m[pc]; ok {
cl.ll.Remove(ele)
delete(cl.m, pc)
}
}
// len returns the number of items in the cache.
func (cl *connLRU) len() int {
return len(cl.m)
}
// validPort reports whether p (without the colon) is a valid port in
// a URL, per RFC 3986 Section 3.2.3, which says the port may be
// empty, or only contain digits.
func validPort(p string) bool {
for _, r := range []byte(p) {
if r < '0' || r > '9' {
return false
}
}
return true
}
| [
"\"GODEBUG\""
]
| []
| [
"GODEBUG"
]
| [] | ["GODEBUG"] | go | 1 | 0 | |
lib/system/nfs/common.go | /*
* Copyright 2018-2021, CS Systemes d'Information, http://csgroup.eu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nfs
import (
"bytes"
"fmt"
"os"
"strings"
rice "github.com/GeertJohan/go.rice"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"github.com/CS-SI/SafeScale/lib/system"
"github.com/CS-SI/SafeScale/lib/utils"
"github.com/CS-SI/SafeScale/lib/utils/cli/enums/outputs"
"github.com/CS-SI/SafeScale/lib/utils/concurrency"
"github.com/CS-SI/SafeScale/lib/utils/fail"
"github.com/CS-SI/SafeScale/lib/utils/retry"
"github.com/CS-SI/SafeScale/lib/utils/template"
"github.com/CS-SI/SafeScale/lib/utils/temporal"
)
//go:generate rice embed-go
// templateProvider is the instance of TemplateProvider used by package nfs
var tmplBox *rice.Box
// getTemplateProvider returns the instance of TemplateProvider
func getTemplateBox() (*rice.Box, fail.Error) {
if tmplBox == nil {
var err error
tmplBox, err = rice.FindBox("../nfs/scripts")
if err != nil {
return nil, fail.ConvertError(err)
}
}
return tmplBox, nil
}
// executeScript executes a script template with parameters in data map
// Returns retcode, stdout, stderr, error
// If error == nil && retcode != 0, the script ran but failed.
// func executeScript(task concurrency.Task, sshconfig system.SSHConfig, name string, data map[string]interface{}) (int, string, string, fail.Error) {
func executeScript(ctx context.Context, sshconfig system.SSHConfig, name string, data map[string]interface{}) (string, fail.Error) {
task, xerr := concurrency.TaskFromContext(ctx)
if xerr != nil {
return "", xerr
}
if task.Aborted() {
return "", fail.AbortedError(nil, "aborted")
}
bashLibrary, xerr := system.GetBashLibrary()
if xerr != nil {
xerr = fail.ExecutionError(xerr)
xerr.Annotate("retcode", 255)
// return 255, "", "", fail.ConvertError(err)
return "", xerr
}
data["reserved_BashLibrary"] = bashLibrary
data["Revision"] = system.REV
scriptHeader := "set -u -o pipefail"
if suffixCandidate := os.Getenv("SAFESCALE_SCRIPTS_FAIL_FAST"); suffixCandidate != "" {
if strings.EqualFold("True", strings.TrimSpace(suffixCandidate)) {
scriptHeader = "set -Eeuxo pipefail"
}
if strings.EqualFold("1", strings.TrimSpace(suffixCandidate)) {
scriptHeader = "set -Eeuxo pipefail"
}
}
data["BashHeader"] = scriptHeader
tmplBox, xerr := getTemplateBox()
if xerr != nil {
xerr = fail.ExecutionError(xerr)
xerr.Annotate("retcode", 255)
return "", xerr
}
// get file content as string
tmplContent, err := tmplBox.String(name)
if err != nil {
// return 255, "", "", fail.ConvertError(err)
xerr = fail.ExecutionError(err)
_ = xerr.Annotate("retcode", 255)
// return 255, "", "", fail.ConvertError(err)
return "", xerr
}
// Prepare the template for execution
tmplPrepared, err := template.Parse(name, tmplContent)
if err != nil {
// return 255, "", "", fail.ConvertError(err)
xerr = fail.ExecutionError(err)
xerr.Annotate("retcode", 255)
// return 255, "", "", fail.ConvertError(err)
return "", xerr
}
var buffer bytes.Buffer
if err := tmplPrepared.Execute(&buffer, data); err != nil {
xerr = fail.ExecutionError(err, "failed to execute template")
xerr.Annotate("retcode", 255)
// return 255, "", "", fail.Wrap(err, "failed to execute template")
return "", xerr
}
content := buffer.String()
hidesOutput := strings.Contains(content, "set +x\n")
if hidesOutput {
content = strings.Replace(content, "set +x\n", "\n", 1)
}
// Copy script to remote host with retries if needed
f, xerr := system.CreateTempFileFromString(content, 0600)
if xerr != nil {
xerr.Annotate("retcode", 255)
// return 255, "", "", fail.Wrap(err, "failed to create temporary file")
return "", xerr
}
defer func() {
if derr := utils.LazyRemove(f.Name()); derr != nil {
logrus.Warnf("Error deleting file: %v", derr)
}
}()
filename := utils.TempFolder + "/" + name
xerr = retry.WhileUnsuccessfulDelay5Seconds(
func() error {
retcode, stdout, stderr, innerXErr := sshconfig.Copy(ctx, filename, f.Name(), true)
if innerXErr != nil {
return fail.Wrap(innerXErr, "ssh operation failed")
}
if retcode != 0 {
innerXErr = fail.ExecutionError(xerr, "script copy failed: %s, %s", stdout, stderr)
_ = innerXErr.Annotate("retcode", retcode).Annotate("stdout", stdout).Annotate("stderr", stderr)
return innerXErr
}
return nil
},
temporal.GetHostTimeout(),
)
if xerr != nil {
switch xerr.(type) {
case *fail.ErrExecution:
default:
xerr = fail.ExecutionError(xerr, "failed to copy script to remote host")
xerr.Annotate("retcode", 255)
}
// return 255, "", "", fail.Wrap(err, "failed to copy script to remote host")
return "", xerr
}
// if k != nil {
xerr = retry.WhileUnsuccessfulDelay5Seconds(
func() error {
sshCmd, innerXErr := sshconfig.NewSudoCommand(ctx, "which scp")
if innerXErr != nil {
return innerXErr
}
defer func() { _ = sshCmd.Close() }()
_, _, _, innerXErr = sshCmd.RunWithTimeout(ctx, outputs.COLLECT, temporal.GetBigDelay())
if innerXErr != nil {
return fail.Wrap(innerXErr, "ssh operation failed")
}
return nil
},
temporal.GetHostTimeout(),
)
if xerr != nil {
return "", xerr
}
// Execute script on remote host with retries if needed
var (
cmd, stdout, stderr string
retcode int
)
if !hidesOutput {
cmd = fmt.Sprintf("chmod u+rwx %s; bash -c %s; exit ${PIPESTATUS}", filename, filename)
} else {
// cmd = fmt.Sprintf("chmod u+rwx %s; export BASH_XTRACEFD=7; bash -c %s 7> /tmp/captured 2>&1;retcode=${PIPESTATUS};cat /tmp/captured; rm /tmp/captured;exit ${retcode}", filename, filename)
cmd = fmt.Sprintf("chmod u+rwx %s; captf=$(mktemp); export BASH_XTRACEFD=7; bash -c %s 7>$captf 2>&1; rc=${PIPESTATUS}; cat $captf; rm $captf; exit ${rc}", filename, filename)
}
xerr = retry.Action(
func() error {
sshCmd, innerXErr := sshconfig.NewSudoCommand(ctx, cmd)
if innerXErr != nil {
return fail.ExecutionError(xerr)
}
defer func() { _ = sshCmd.Close() }()
if retcode, stdout, stderr, innerXErr = sshCmd.RunWithTimeout(ctx, outputs.COLLECT, temporal.GetBigDelay()); innerXErr != nil {
return fail.Wrap(innerXErr, "ssh operation failed")
}
return nil
},
retry.PrevailDone(retry.UnsuccessfulWhereRetcode255(), retry.Timeout(temporal.GetContextTimeout())),
retry.Constant(temporal.GetDefaultDelay()),
nil, nil, nil,
)
if xerr != nil {
switch cErr := xerr.(type) {
case *fail.ErrTimeout:
logrus.Errorf("ErrTimeout running remote script '%s'", name)
xerr := fail.ExecutionError(cErr)
xerr.Annotate("retcode", 255)
// return 255, stdout, stderr, retryErr
return stdout, xerr
case *fail.ErrExecution:
return stdout, cErr
default:
xerr = fail.ExecutionError(xerr)
xerr.Annotate("retcode", 255).Annotate("stderr", "")
// return 255, stdout, stderr, retryErr
return stdout, xerr
}
}
if retcode != 0 {
xerr = fail.ExecutionError(nil, "command exited with error code '%d'", retcode)
_ = xerr.Annotate("retcode", retcode).Annotate("stdout", stdout).Annotate("stderr", stderr)
return stdout, xerr
}
return stdout, nil
}
| [
"\"SAFESCALE_SCRIPTS_FAIL_FAST\""
]
| []
| [
"SAFESCALE_SCRIPTS_FAIL_FAST"
]
| [] | ["SAFESCALE_SCRIPTS_FAIL_FAST"] | go | 1 | 0 | |
bpython/args.py | # The MIT License
#
# Copyright (c) 2008 Bob Farrell
# Copyright (c) 2012-2021 Sebastian Ramacher
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module to handle command line argument parsing, for all front-ends.
"""
import argparse
from typing import Tuple
import curtsies
import cwcwidth
import greenlet
import importlib.util
import logging
import os
import pygments
import requests
import sys
from pathlib import Path
from . import __version__, __copyright__
from .config import default_config_path, Config
from .translations import _
logger = logging.getLogger(__name__)
class ArgumentParserFailed(ValueError):
"""Raised by the RaisingOptionParser for a bogus commandline."""
class RaisingArgumentParser(argparse.ArgumentParser):
def error(self, msg):
raise ArgumentParserFailed()
def version_banner(base="bpython") -> str:
return _("{} version {} on top of Python {} {}").format(
base,
__version__,
sys.version.split()[0],
sys.executable,
)
def copyright_banner() -> str:
return _("{} See AUTHORS.rst for details.").format(__copyright__)
def parse(args, extras=None, ignore_stdin=False) -> Tuple:
"""Receive an argument list - if None, use sys.argv - parse all args and
take appropriate action. Also receive optional extra argument: this should
be a tuple of (title, description, callback)
title: The title for the argument group
description: A full description of the argument group
callback: A callback that adds argument to the argument group
e.g.:
def callback(group):
group.add_argument('-f', action='store_true', dest='f', help='Explode')
group.add_argument('-l', action='store_true', dest='l', help='Love')
parse(
['-i', '-m', 'foo.py'],
(
'Front end-specific options',
'A full description of what these options are for',
callback
),
)
Return a tuple of (config, options, exec_args) wherein "config" is the
config object either parsed from a default/specified config file or default
config options, "options" is the parsed options from
ArgumentParser.parse_args, and "exec_args" are the args (if any) to be parsed
to the executed file (if any).
"""
if args is None:
args = sys.argv[1:]
parser = RaisingArgumentParser(
usage=_(
"Usage: %(prog)s [options] [file [args]]\n"
"NOTE: If bpython sees an argument it does "
"not know, execution falls back to the "
"regular Python interpreter."
)
)
parser.add_argument(
"--config",
default=default_config_path(),
type=Path,
help=_("Use CONFIG instead of default config file."),
)
parser.add_argument(
"--interactive",
"-i",
action="store_true",
help=_("Drop to bpython shell after running file instead of exiting."),
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help=_("Don't flush the output to stdout."),
)
parser.add_argument(
"--version",
"-V",
action="store_true",
help=_("Print version and exit."),
)
parser.add_argument(
"--log-level",
"-l",
choices=("debug", "info", "warning", "error", "critical"),
default="error",
help=_("Set log level for logging"),
)
parser.add_argument(
"--log-output",
"-L",
help=_("Log output file"),
)
if extras is not None:
extras_group = parser.add_argument_group(extras[0], extras[1])
extras[2](extras_group)
# collect all the remaining arguments into a list
parser.add_argument(
"args",
nargs=argparse.REMAINDER,
help=_(
"File to execute and additional arguments passed on to the executed script."
),
)
try:
options = parser.parse_args(args)
except ArgumentParserFailed:
# Just let Python handle this
os.execv(sys.executable, [sys.executable] + args)
if options.version:
print(version_banner())
print(copyright_banner())
raise SystemExit
if not ignore_stdin and not (sys.stdin.isatty() and sys.stdout.isatty()):
# Just let Python handle this
os.execv(sys.executable, [sys.executable] + args)
# Configure logging handler
bpython_logger = logging.getLogger("bpython")
curtsies_logger = logging.getLogger("curtsies")
bpython_logger.setLevel(options.log_level.upper())
curtsies_logger.setLevel(options.log_level.upper())
if options.log_output:
handler = logging.FileHandler(filename=options.log_output)
handler.setFormatter(
logging.Formatter(
"%(asctime)s: %(name)s: %(levelname)s: %(message)s"
)
)
bpython_logger.addHandler(handler)
curtsies_logger.addHandler(handler)
bpython_logger.propagate = curtsies_logger.propagate = False
else:
bpython_logger.addHandler(logging.NullHandler())
curtsies_logger.addHandler(logging.NullHandler())
logger.info(f"Starting bpython {__version__}")
logger.info(f"Python {sys.executable}: {sys.version_info}")
logger.info(f"curtsies: {curtsies.__version__}")
logger.info(f"cwcwidth: {cwcwidth.__version__}")
logger.info(f"greenlet: {greenlet.__version__}")
logger.info(f"pygments: {pygments.__version__}")
logger.info(f"requests: {requests.__version__}")
logger.info(
"environment:\n{}".format(
"\n".join(
f"{key}: {value}"
for key, value in sorted(os.environ.items())
if key.startswith("LC")
or key.startswith("LANG")
or key == "TERM"
)
)
)
return Config(options.config), options, options.args
def exec_code(interpreter, args):
"""
Helper to execute code in a given interpreter, e.g. to implement the behavior of python3 [-i] file.py
args should be a [faked] sys.argv.
"""
try:
with open(args[0]) as sourcefile:
source = sourcefile.read()
except OSError as e:
# print an error and exit (if -i is specified the calling code will continue)
print(f"bpython: can't open file '{args[0]}: {e}", file=sys.stderr)
raise SystemExit(e.errno)
old_argv, sys.argv = sys.argv, args
sys.path.insert(0, os.path.abspath(os.path.dirname(args[0])))
spec = importlib.util.spec_from_loader("__console__", loader=None)
mod = importlib.util.module_from_spec(spec)
sys.modules["__console__"] = mod
interpreter.locals.update(mod.__dict__)
interpreter.locals["__file__"] = args[0]
interpreter.runsource(source, args[0], "exec")
sys.argv = old_argv
| []
| []
| []
| [] | [] | python | 0 | 0 | |
fhirclient/r4models/organization_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import organization
from .fhirdate import FHIRDate
class OrganizationTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Organization", js["resourceType"])
return organization.Organization(js)
def testOrganization1(self):
inst = self.instantiate_from("organization-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization1(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization1(inst2)
def implOrganization1(self, inst):
self.assertEqual(inst.address[0].city, "Ann Arbor")
self.assertEqual(inst.address[0].country, "USA")
self.assertEqual(inst.address[0].line[0], "3300 Washtenaw Avenue, Suite 227")
self.assertEqual(inst.address[0].postalCode, "48104")
self.assertEqual(inst.address[0].state, "MI")
self.assertEqual(inst.alias[0], "HL7 International")
self.assertEqual(inst.id, "hl7")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Health Level Seven International")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "(+1) 734-677-7777")
self.assertEqual(inst.telecom[1].system, "fax")
self.assertEqual(inst.telecom[1].value, "(+1) 734-677-6622")
self.assertEqual(inst.telecom[2].system, "email")
self.assertEqual(inst.telecom[2].value, "[email protected]")
self.assertEqual(inst.text.status, "generated")
def testOrganization2(self):
inst = self.instantiate_from("organization-example-mmanu.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization2(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization2(inst2)
def implOrganization2(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].country, "Swizterland")
self.assertEqual(inst.id, "mmanu")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Acme Corporation")
self.assertEqual(inst.text.status, "generated")
def testOrganization3(self):
inst = self.instantiate_from("organization-example-gastro.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization3(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization3(inst2)
def implOrganization3(self, inst):
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "Gastro")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Gastroenterology")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "mobile")
self.assertEqual(inst.telecom[0].value, "+1 555 234 3523")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "[email protected]")
self.assertEqual(inst.text.status, "generated")
def testOrganization4(self):
inst = self.instantiate_from("organization-example-mihealth.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization4(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization4(inst2)
def implOrganization4(self, inst):
self.assertEqual(inst.alias[0], "Michigan State Department of Health")
self.assertEqual(inst.id, "3")
self.assertEqual(inst.identifier[0].system, "http://michigan.gov/state-dept-ids")
self.assertEqual(inst.identifier[0].value, "25")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Michigan Health")
self.assertEqual(inst.text.status, "generated")
def testOrganization5(self):
inst = self.instantiate_from("organization-example-lab.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization5(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization5(inst2)
def implOrganization5(self, inst):
self.assertEqual(inst.id, "1832473e-2fe0-452d-abe9-3cdb9879522f")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "ClinLab")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Clinical Lab")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+1 555 234 1234")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "[email protected]")
self.assertEqual(inst.text.status, "generated")
def testOrganization6(self):
inst = self.instantiate_from("organization-example-f002-burgers-card.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization6(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization6(inst2)
def implOrganization6(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].address.line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].name.text, "mevr. D. de Haan")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "ADMIN")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://terminology.hl7.org/CodeSystem/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2321")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.contact[0].telecom[2].system, "fax")
self.assertEqual(inst.contact[0].telecom[2].value, "022-655 2322")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Burgers UMC Cardiology unit")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "022-655 2320")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "dept")
self.assertEqual(inst.type[0].coding[0].display, "Hospital Department")
self.assertEqual(inst.type[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/organization-type")
def testOrganization7(self):
inst = self.instantiate_from("organization-example-f201-aumc.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization7(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization7(inst2)
def implOrganization7(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Den Helder")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Walvisbaai 3")
self.assertEqual(inst.address[0].postalCode, "2333ZA")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.contact[0].address.city, "Den helder")
self.assertEqual(inst.contact[0].address.country, "NLD")
self.assertEqual(inst.contact[0].address.line[0], "Walvisbaai 3")
self.assertEqual(inst.contact[0].address.line[1], "Gebouw 2")
self.assertEqual(inst.contact[0].address.postalCode, "2333ZA")
self.assertEqual(inst.contact[0].name.family, "Brand")
self.assertEqual(inst.contact[0].name.given[0], "Ronald")
self.assertEqual(inst.contact[0].name.prefix[0], "Prof.Dr.")
self.assertEqual(inst.contact[0].name.text, "Professor Brand")
self.assertEqual(inst.contact[0].name.use, "official")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "+31715269702")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].system, "http://www.zorgkaartnederland.nl/")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "Artis University Medical Center")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Artis University Medical Center (AUMC)")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+31715269111")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "405608006")
self.assertEqual(inst.type[0].coding[0].display, "Academic Medical Center")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type[0].coding[1].code, "V6")
self.assertEqual(inst.type[0].coding[1].display, "University Medical Hospital")
self.assertEqual(inst.type[0].coding[1].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type[0].coding[2].code, "prov")
self.assertEqual(inst.type[0].coding[2].display, "Healthcare Provider")
self.assertEqual(inst.type[0].coding[2].system, "http://terminology.hl7.org/CodeSystem/organization-type")
def testOrganization8(self):
inst = self.instantiate_from("organization-example-good-health-care.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization8(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization8(inst2)
def implOrganization8(self, inst):
self.assertEqual(inst.id, "2.16.840.1.113883.19.5")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "2.16.840.1.113883.19.5")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Good Health Clinic")
self.assertEqual(inst.text.status, "generated")
def testOrganization9(self):
inst = self.instantiate_from("organization-example-f001-burgers.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization9(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization9(inst2)
def implOrganization9(self, inst):
self.assertEqual(inst.address[0].city, "Den Burg")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Galapagosweg 91")
self.assertEqual(inst.address[0].postalCode, "9105 PZ")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.address[1].city, "Den Burg")
self.assertEqual(inst.address[1].country, "NLD")
self.assertEqual(inst.address[1].line[0], "PO Box 2311")
self.assertEqual(inst.address[1].postalCode, "9100 AA")
self.assertEqual(inst.address[1].use, "work")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "PRESS")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://terminology.hl7.org/CodeSystem/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2334")
self.assertEqual(inst.contact[1].purpose.coding[0].code, "PATINF")
self.assertEqual(inst.contact[1].purpose.coding[0].system, "http://terminology.hl7.org/CodeSystem/contactentity-type")
self.assertEqual(inst.contact[1].telecom[0].system, "phone")
self.assertEqual(inst.contact[1].telecom[0].value, "022-655 2335")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.528.1")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "91654")
self.assertEqual(inst.identifier[1].system, "urn:oid:2.16.840.1.113883.2.4.6.1")
self.assertEqual(inst.identifier[1].use, "usual")
self.assertEqual(inst.identifier[1].value, "17-0112278")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Burgers University Medical Center")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "022-655 2300")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "V6")
self.assertEqual(inst.type[0].coding[0].display, "University Medical Hospital")
self.assertEqual(inst.type[0].coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type[0].coding[1].code, "prov")
self.assertEqual(inst.type[0].coding[1].display, "Healthcare Provider")
self.assertEqual(inst.type[0].coding[1].system, "http://terminology.hl7.org/CodeSystem/organization-type")
def testOrganization10(self):
inst = self.instantiate_from("organization-example-insurer.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization10(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization10(inst2)
def implOrganization10(self, inst):
self.assertEqual(inst.alias[0], "ABC Insurance")
self.assertEqual(inst.id, "2")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.3.19.2.3")
self.assertEqual(inst.identifier[0].value, "666666")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "XYZ Insurance")
self.assertEqual(inst.text.status, "generated")
| []
| []
| [
"FHIR_UNITTEST_DATADIR"
]
| [] | ["FHIR_UNITTEST_DATADIR"] | python | 1 | 0 | |
common/persistence/tests/mysql_test.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tests
import (
"fmt"
"net"
"path/filepath"
"strconv"
"testing"
"github.com/stretchr/testify/suite"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/log"
p "go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/sql"
"go.temporal.io/server/common/persistence/sql/sqlplugin"
_ "go.temporal.io/server/common/persistence/sql/sqlplugin/mysql"
"go.temporal.io/server/common/resolver"
"go.temporal.io/server/common/shuffle"
"go.temporal.io/server/environment"
)
// TODO merge the initialization with existing persistence setup
const (
testMySQLClusterName = "temporal_mysql_cluster"
testMySQLUser = "temporal"
testMySQLPassword = "temporal"
testMySQLConnectionProtocol = "tcp"
testMySQLDatabaseNamePrefix = "test_"
testMySQLDatabaseNameSuffix = "temporal_persistence"
// TODO hard code this dir for now
// need to merge persistence test config / initialization in one place
testMySQLExecutionSchema = "../../../schema/mysql/v57/temporal/schema.sql"
testMySQLVisibilitySchema = "../../../schema/mysql/v57/visibility/schema.sql"
)
func TestMySQLHistoryStoreSuite(t *testing.T) {
cfg := NewMySQLConfig()
SetupMySQLDatabase(cfg)
SetupMySQLSchema(cfg)
logger := log.NewNoopLogger()
factory := sql.NewFactory(
*cfg,
resolver.NewNoopResolver(),
testMySQLClusterName,
logger,
)
store, err := factory.NewHistoryStore()
if err != nil {
t.Fatalf("unable to create MySQL DB: %v", err)
}
defer func() {
factory.Close()
TearDownMySQLDatabase(cfg)
}()
s := newHistoryEventsSuite(t, store, logger)
suite.Run(t, s)
}
// NewMySQLConfig returns a new MySQL config for test
func NewMySQLConfig() *config.SQL {
return &config.SQL{
User: testMySQLUser,
Password: testMySQLPassword,
ConnectAddr: net.JoinHostPort(
environment.GetMySQLAddress(),
strconv.Itoa(environment.GetMySQLPort()),
),
ConnectProtocol: testMySQLConnectionProtocol,
PluginName: "mysql",
DatabaseName: testMySQLDatabaseNamePrefix + shuffle.String(testMySQLDatabaseNameSuffix),
}
}
func SetupMySQLDatabase(cfg *config.SQL) {
adminCfg := *cfg
// NOTE need to connect with empty name to create new database
adminCfg.DatabaseName = ""
db, err := sql.NewSQLAdminDB(sqlplugin.DbKindUnknown, &adminCfg, resolver.NewNoopResolver())
if err != nil {
panic(fmt.Sprintf("unable to create MySQL admin DB: %v", err))
}
defer func() { _ = db.Close() }()
err = db.CreateDatabase(cfg.DatabaseName)
if err != nil {
panic(fmt.Sprintf("unable to create MySQL database: %v", err))
}
}
func SetupMySQLSchema(cfg *config.SQL) {
db, err := sql.NewSQLAdminDB(sqlplugin.DbKindUnknown, cfg, resolver.NewNoopResolver())
if err != nil {
panic(fmt.Sprintf("unable to create MySQL admin DB: %v", err))
}
defer func() { _ = db.Close() }()
schemaPath, err := filepath.Abs(testMySQLExecutionSchema)
if err != nil {
panic(err)
}
statements, err := p.LoadAndSplitQuery([]string{schemaPath})
if err != nil {
panic(err)
}
for _, stmt := range statements {
if err = db.Exec(stmt); err != nil {
panic(err)
}
}
schemaPath, err = filepath.Abs(testMySQLVisibilitySchema)
if err != nil {
panic(err)
}
statements, err = p.LoadAndSplitQuery([]string{schemaPath})
if err != nil {
panic(err)
}
for _, stmt := range statements {
if err = db.Exec(stmt); err != nil {
panic(err)
}
}
}
func TearDownMySQLDatabase(cfg *config.SQL) {
adminCfg := *cfg
// NOTE need to connect with empty name to create new database
adminCfg.DatabaseName = ""
db, err := sql.NewSQLAdminDB(sqlplugin.DbKindUnknown, &adminCfg, resolver.NewNoopResolver())
if err != nil {
panic(fmt.Sprintf("unable to create MySQL admin DB: %v", err))
}
defer func() { _ = db.Close() }()
err = db.DropDatabase(cfg.DatabaseName)
if err != nil {
panic(fmt.Sprintf("unable to drop MySQL database: %v", err))
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
impleter/publishers/src/eb_s3_firehose_ner_label_json_lambda_function.py | """ Eventbridge compatible Lambda function that published Named Entity Recognition (NER) payloads to the Kinesis Firehose delivery stream.
"""
from typing import Any, Dict
import os
import sys
import urllib.parse
import json
import boto3
from jsonschema import validate
from schema_validators import NER_LABEL_SCHEMA
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
json_extension = "json"
jsonl_extension = "jsonl"
valid_file_extensions = [json_extension, jsonl_extension]
def lambda_handler(event: Dict[str, Any], context):
"""Eventbridge compatible Lambda function that published Named Entity Recognition (NER) payloads stored in S3 to the Kinesis Firehose delivery stream.
Args:
event: Eventbridge message (dict)
context: Lambda context contains methods and properties that provide information about the invocation, function, and execution environment (dict)
Returns:
list (Kinesis Firehose delivery stream response dict)
Raises:
"""
stream_name = os.getenv("STREAM_NAME", None)
region = os.getenv("AWS_REGION", None)
bucket = event['detail']['bucket']['name']
key = event['detail']['object']['key']
resp = list()
logger.info("Processing s3 key: s3://" + os.path.join(str(bucket), str(key)))
try:
file_extension = key.split(".")[-1]
if file_extension not in valid_file_extensions:
raise ValueError("file must be [file_name]."+ file_extension +". Ignoring file: " + str(key))
key = urllib.parse.unquote_plus(key)
s3 = boto3.resource('s3')
obj = s3.Object(bucket, key)
output_json = obj.get()['Body'].read().decode("utf-8")
kinesis_client = boto3.client('firehose', region)
if file_extension == jsonl_extension:
json_list = output_json.split('\n')
records = list()
for json_elem in json_list:
try:
validate(json.loads(json_elem), NER_LABEL_SCHEMA)
records.append({"Data": json_elem})
except Exception:
ex_type, ex_value, ex_traceback = sys.exc_info()
logger.error("bucket: {bucket}, key: {key}, json_elem: {json_elem}, exception_type: {ex_type}, exception_value: {ex_value}, exception_traceback: {ex_traceback}".format(bucket=bucket, key=key, json_elem=json_elem, ex_type=ex_type, ex_value=ex_value, ex_traceback=ex_traceback))
resp.append(kinesis_client.put_record_batch(
DeliveryStreamName=stream_name,
Records=records))
else:
validate(json.loads(output_json), NER_LABEL_SCHEMA)
resp.append(kinesis_client.put_record(
DeliveryStreamName=stream_name,
Record={"Data": output_json}))
except Exception:
ex_type, ex_value, ex_traceback = sys.exc_info()
logger.error("bucket: {bucket}, key: {key}, output_json: {output_json}, exception_type: {ex_type}, exception_value: {ex_value}, exception_traceback: {ex_traceback}".format(bucket=bucket, key=key, output_json=output_json, ex_type=ex_type, ex_value=ex_value, ex_traceback=ex_traceback))
return resp
| []
| []
| [
"STREAM_NAME",
"AWS_REGION"
]
| [] | ["STREAM_NAME", "AWS_REGION"] | python | 2 | 0 | |
cmd/service-controller/controller.go | package main
import (
"context"
"crypto/tls"
jsonencoding "encoding/json"
"fmt"
"log"
"os"
"reflect"
"strings"
"time"
"github.com/yzhelezko/skupper/pkg/utils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apimachinerytypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsv1informer "k8s.io/client-go/informers/apps/v1"
corev1informer "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/informers/internalinterfaces"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
amqp "github.com/interconnectedcloud/go-amqp"
"github.com/yzhelezko/skupper/api/types"
"github.com/yzhelezko/skupper/client"
"github.com/yzhelezko/skupper/pkg/event"
"github.com/yzhelezko/skupper/pkg/kube"
"github.com/yzhelezko/skupper/pkg/qdr"
)
type Controller struct {
origin string
vanClient *client.VanClient
bridgeDefInformer cache.SharedIndexInformer
svcDefInformer cache.SharedIndexInformer
svcInformer cache.SharedIndexInformer
headlessInformer cache.SharedIndexInformer
// control loop state:
events workqueue.RateLimitingInterface
bindings map[string]*ServiceBindings
ports *FreePorts
// service_sync state:
disableServiceSync bool
tlsConfig *tls.Config
amqpClient *amqp.Client
amqpSession *amqp.Session
byOrigin map[string]map[string]types.ServiceInterface
localServices map[string]types.ServiceInterface
byName map[string]types.ServiceInterface
desiredServices map[string]types.ServiceInterface
heardFrom map[string]time.Time
definitionMonitor *DefinitionMonitor
consoleServer *ConsoleServer
siteQueryServer *SiteQueryServer
configSync *ConfigSync
claimVerifier *ClaimVerifier
tokenHandler *SecretController
claimHandler *SecretController
}
const (
ServiceControllerEvent string = "ServiceControllerEvent"
ServiceControllerError string = "ServiceControllerError"
ServiceControllerCreateEvent string = "ServiceControllerCreateEvent"
ServiceControllerUpdateEvent string = "ServiceControllerUpdateEvent"
ServiceControllerDeleteEvent string = "ServiceControllerDeleteEvent"
)
func hasProxyAnnotation(service corev1.Service) bool {
if _, ok := service.ObjectMeta.Annotations[types.ProxyQualifier]; ok {
return true
} else {
return false
}
}
func getProxyName(name string) string {
return name + "-proxy"
}
func getServiceName(name string) string {
return strings.TrimSuffix(name, "-proxy")
}
func hasSkupperAnnotation(service corev1.Service, annotation string) bool {
_, ok := service.ObjectMeta.Annotations[annotation]
return ok
}
func hasRouterSelector(service corev1.Service) bool {
value, ok := service.Spec.Selector[types.ComponentAnnotation]
return ok && value == types.RouterComponent
}
func getApplicationSelector(service *corev1.Service) string {
if hasRouterSelector(*service) {
selector := map[string]string{}
for key, value := range service.Spec.Selector {
if key != types.ComponentAnnotation && !(key == "application" && value == "skupper-router") {
selector[key] = value
}
}
return utils.StringifySelector(selector)
} else {
return utils.StringifySelector(service.Spec.Selector)
}
}
func hasOriginalSelector(service corev1.Service) bool {
return hasSkupperAnnotation(service, types.OriginalSelectorQualifier)
}
func hasOriginalTargetPort(service corev1.Service) bool {
return hasSkupperAnnotation(service, types.OriginalTargetPortQualifier)
}
func hasOriginalAssigned(service corev1.Service) bool {
return hasSkupperAnnotation(service, types.OriginalAssignedQualifier)
}
func NewController(cli *client.VanClient, origin string, tlsConfig *tls.Config, disableServiceSync bool) (*Controller, error) {
// create informers
svcInformer := corev1informer.NewServiceInformer(
cli.KubeClient,
cli.Namespace,
time.Second*30,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
svcDefInformer := corev1informer.NewFilteredConfigMapInformer(
cli.KubeClient,
cli.Namespace,
time.Second*30,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
internalinterfaces.TweakListOptionsFunc(func(options *metav1.ListOptions) {
options.FieldSelector = "metadata.name=skupper-services"
}))
bridgeDefInformer := corev1informer.NewFilteredConfigMapInformer(
cli.KubeClient,
cli.Namespace,
time.Second*30,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
internalinterfaces.TweakListOptionsFunc(func(options *metav1.ListOptions) {
options.FieldSelector = "metadata.name=skupper-internal"
}))
headlessInformer := appsv1informer.NewFilteredStatefulSetInformer(
cli.KubeClient,
cli.Namespace,
time.Second*30,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
internalinterfaces.TweakListOptionsFunc(func(options *metav1.ListOptions) {
options.LabelSelector = "internal.skupper.io/type=proxy"
}))
events := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "skupper-service-controller")
controller := &Controller{
vanClient: cli,
origin: origin,
tlsConfig: tlsConfig,
bridgeDefInformer: bridgeDefInformer,
svcDefInformer: svcDefInformer,
svcInformer: svcInformer,
headlessInformer: headlessInformer,
events: events,
ports: newFreePorts(),
disableServiceSync: disableServiceSync,
}
// Organize service definitions
controller.byOrigin = make(map[string]map[string]types.ServiceInterface)
controller.localServices = make(map[string]types.ServiceInterface)
controller.byName = make(map[string]types.ServiceInterface)
controller.desiredServices = make(map[string]types.ServiceInterface)
controller.heardFrom = make(map[string]time.Time)
log.Println("Setting up event handlers")
svcDefInformer.AddEventHandler(controller.newEventHandler("servicedefs", AnnotatedKey, ConfigMapResourceVersionTest))
bridgeDefInformer.AddEventHandler(controller.newEventHandler("bridges", AnnotatedKey, ConfigMapResourceVersionTest))
svcInformer.AddEventHandler(controller.newEventHandler("actual-services", AnnotatedKey, ServiceResourceVersionTest))
headlessInformer.AddEventHandler(controller.newEventHandler("statefulset", AnnotatedKey, StatefulSetResourceVersionTest))
controller.consoleServer = newConsoleServer(cli, tlsConfig)
controller.siteQueryServer = newSiteQueryServer(cli, tlsConfig)
controller.definitionMonitor = newDefinitionMonitor(controller.origin, controller.vanClient, controller.svcDefInformer, controller.svcInformer)
controller.configSync = newConfigSync(controller.bridgeDefInformer, tlsConfig)
if enableClaimVerifier() {
controller.claimVerifier = newClaimVerifier(controller.vanClient)
}
controller.tokenHandler = newTokenHandler(controller.vanClient, origin)
controller.claimHandler = newClaimHandler(controller.vanClient, origin)
return controller, nil
}
type ResourceVersionTest func(a interface{}, b interface{}) bool
func ConfigMapResourceVersionTest(a interface{}, b interface{}) bool {
aa := a.(*corev1.ConfigMap)
bb := b.(*corev1.ConfigMap)
return aa.ResourceVersion == bb.ResourceVersion
}
func PodResourceVersionTest(a interface{}, b interface{}) bool {
aa := a.(*corev1.Pod)
bb := b.(*corev1.Pod)
return aa.ResourceVersion == bb.ResourceVersion
}
func ServiceResourceVersionTest(a interface{}, b interface{}) bool {
aa := a.(*corev1.Service)
bb := b.(*corev1.Service)
return aa.ResourceVersion == bb.ResourceVersion
}
func StatefulSetResourceVersionTest(a interface{}, b interface{}) bool {
aa := a.(*appsv1.StatefulSet)
bb := b.(*appsv1.StatefulSet)
return aa.ResourceVersion == bb.ResourceVersion
}
type CacheKeyStrategy func(category string, object interface{}) (string, error)
func AnnotatedKey(category string, obj interface{}) (string, error) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return "", err
}
return category + "@" + key, nil
}
func SimpleKey(category string, obj interface{}) (string, error) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return "", err
}
return key, nil
}
func FixedKey(category string, obj interface{}) (string, error) {
return category, nil
}
func splitKey(key string) (string, string) {
parts := strings.Split(key, "@")
return parts[0], parts[1]
}
func (c *Controller) newEventHandler(category string, keyStrategy CacheKeyStrategy, test ResourceVersionTest) *cache.ResourceEventHandlerFuncs {
return newEventHandlerFor(c.events, category, keyStrategy, test)
}
func newEventHandlerFor(events workqueue.RateLimitingInterface, category string, keyStrategy CacheKeyStrategy, test ResourceVersionTest) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := keyStrategy(category, obj)
if err != nil {
utilruntime.HandleError(err)
} else {
events.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
if !test(old, new) {
key, err := keyStrategy(category, new)
if err != nil {
utilruntime.HandleError(err)
} else {
events.Add(key)
}
}
},
DeleteFunc: func(obj interface{}) {
key, err := keyStrategy(category, obj)
if err != nil {
utilruntime.HandleError(err)
} else {
events.Add(key)
}
},
}
}
func (c *Controller) Run(stopCh <-chan struct{}) error {
// fire up the informers
go c.svcDefInformer.Run(stopCh)
go c.bridgeDefInformer.Run(stopCh)
go c.svcInformer.Run(stopCh)
go c.headlessInformer.Run(stopCh)
defer utilruntime.HandleCrash()
defer c.events.ShutDown()
log.Println("Starting the Skupper controller")
log.Println("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.svcDefInformer.HasSynced, c.bridgeDefInformer.HasSynced, c.svcInformer.HasSynced, c.headlessInformer.HasSynced); !ok {
return fmt.Errorf("Failed to wait for caches to sync")
}
log.Println("Starting workers")
if !c.disableServiceSync {
go wait.Until(c.runServiceSync, time.Second, stopCh)
}
go wait.Until(c.runServiceCtrl, time.Second, stopCh)
c.definitionMonitor.start(stopCh)
c.siteQueryServer.start(stopCh)
c.consoleServer.start(stopCh)
c.configSync.start(stopCh)
if c.claimVerifier != nil {
c.claimVerifier.start(stopCh)
}
c.tokenHandler.start(stopCh)
c.claimHandler.start(stopCh)
log.Println("Started workers")
<-stopCh
log.Println("Shutting down workers")
c.configSync.stop()
c.definitionMonitor.stop()
c.tokenHandler.stop()
c.claimHandler.stop()
return nil
}
func (c *Controller) createServiceFor(desired *ServiceBindings) error {
event.Recordf(ServiceControllerCreateEvent, "Creating new service for %s", desired.address)
_, err := kube.NewServiceForAddress(desired.address, desired.publicPorts, desired.ingressPorts, desired.labels, getOwnerReference(), c.vanClient.Namespace, c.vanClient.KubeClient)
if err != nil {
event.Recordf(ServiceControllerError, "Error while creating service %s: %s", desired.address, err)
}
return err
}
func (c *Controller) createHeadlessServiceFor(desired *ServiceBindings) error {
event.Recordf(ServiceControllerCreateEvent, "Creating new headless service for %s", desired.address)
_, err := kube.NewHeadlessServiceForAddress(desired.address, desired.publicPorts, desired.ingressPorts, desired.labels, getOwnerReference(), c.vanClient.Namespace, c.vanClient.KubeClient)
if err != nil {
event.Recordf(ServiceControllerError, "Error while creating headless service %s: %s", desired.address, err)
}
return err
}
func equivalentSelectors(a map[string]string, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for k, v := range a {
if v2, ok := b[k]; !ok || v != v2 {
return false
}
}
for k, v := range b {
if v2, ok := a[k]; !ok || v != v2 {
return false
}
}
return true
}
func (c *Controller) checkServiceFor(desired *ServiceBindings, actual *corev1.Service) error {
event.Recordf(ServiceControllerEvent, "Checking service changes for %s", actual.ObjectMeta.Name)
update := false
desiredPorts := desired.PortMap()
// adding or updating ports
actualPorts := kube.GetServicePortMap(actual)
originalAssignedPorts := kube.GetOriginalAssignedPorts(actual)
var ports []corev1.ServicePort
for pPort, iPort := range desiredPorts {
actualIngPort, found := actualPorts[pPort]
if !found {
update = true
ports = append(ports, corev1.ServicePort{
Name: fmt.Sprintf("port%d", pPort),
Port: int32(pPort),
TargetPort: intstr.IntOrString{IntVal: int32(iPort)},
})
} else if actualIngPort != iPort {
update = true
port := kube.GetServicePort(actual, pPort)
port.TargetPort = intstr.IntOrString{IntVal: int32(iPort)}
ports = append(ports, *port)
}
}
// updating annotations
if update {
if actual.ObjectMeta.Annotations == nil {
actual.ObjectMeta.Annotations = map[string]string{}
}
// If target port has been modified by user
if !reflect.DeepEqual(actualPorts, originalAssignedPorts) {
actual.ObjectMeta.Annotations[types.OriginalTargetPortQualifier] = kube.PortMapToLabelStr(actualPorts)
}
actual.ObjectMeta.Annotations[types.OriginalAssignedQualifier] = kube.PortMapToLabelStr(desiredPorts)
}
// removing ports
for pPort, _ := range actualPorts {
if _, found := desiredPorts[pPort]; !found {
update = true
}
}
if desired.headless == nil && !equivalentSelectors(actual.Spec.Selector, kube.GetLabelsForRouter()) {
update = true
if actual.ObjectMeta.Annotations == nil {
actual.ObjectMeta.Annotations = map[string]string{}
}
originalSelector := getApplicationSelector(actual)
if originalSelector != "" {
actual.ObjectMeta.Annotations[types.OriginalSelectorQualifier] = originalSelector
}
actual.Spec.Selector = kube.GetLabelsForRouter()
}
if !reflect.DeepEqual(desired.labels, actual.Labels) {
update = true
if actual.Labels == nil {
actual.Labels = map[string]string{}
}
for k, v := range desired.labels {
actual.Labels[k] = v
}
}
if update {
if len(ports) > 0 {
actual.Spec.Ports = ports
}
_, err := c.vanClient.KubeClient.CoreV1().Services(c.vanClient.Namespace).Update(context.Background(), actual, metav1.UpdateOptions{})
return err
}
return nil
}
func (c *Controller) ensureServiceFor(desired *ServiceBindings) error {
event.Recordf(ServiceControllerEvent, "Checking service for: %s", desired.address)
obj, exists, err := c.svcInformer.GetStore().GetByKey(c.namespaced(desired.address))
if err != nil {
return fmt.Errorf("Error checking service %s", err)
} else if !exists {
if desired.headless == nil {
return c.createServiceFor(desired)
} else if desired.origin == "" {
// i.e. originating namespace
event.Recordf(ServiceControllerError, "Headless service does not exist for for %s", desired.address)
return nil
} else {
return c.createHeadlessServiceFor(desired)
}
} else {
svc := obj.(*corev1.Service)
return c.checkServiceFor(desired, svc)
}
}
func (c *Controller) deleteService(svc *corev1.Service) error {
event.Recordf(ServiceControllerDeleteEvent, "Deleting service %s", svc.ObjectMeta.Name)
return c.vanClient.KubeClient.CoreV1().Services(c.vanClient.Namespace).Delete(context.Background(), svc.ObjectMeta.Name, metav1.DeleteOptions{})
}
func (c *Controller) updateActualServices() {
for _, v := range c.bindings {
c.ensureServiceFor(v)
}
services := c.svcInformer.GetStore().List()
for _, v := range services {
svc := v.(*corev1.Service)
if c.bindings[svc.ObjectMeta.Name] == nil && isOwned(svc) {
event.Recordf(ServiceControllerDeleteEvent, "No service binding found for %s", svc.ObjectMeta.Name)
c.deleteService(svc)
}
}
}
// TODO: move to pkg
func equalOwnerRefs(a, b []metav1.OwnerReference) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
func getOwnerReference() *metav1.OwnerReference {
ownerName := os.Getenv("OWNER_NAME")
ownerUid := os.Getenv("OWNER_UID")
if ownerName == "" || ownerUid == "" {
return nil
} else {
return &metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: ownerName,
UID: apimachinerytypes.UID(ownerUid),
}
}
}
func isOwned(service *corev1.Service) bool {
owner := getOwnerReference()
if owner == nil {
return false
}
ownerRefs := []metav1.OwnerReference{
*owner,
}
if controlled, ok := service.ObjectMeta.Annotations[types.ControlledQualifier]; ok {
if controlled == "true" {
return equalOwnerRefs(service.ObjectMeta.OwnerReferences, ownerRefs)
} else {
return false
}
} else {
return false
}
}
func (c *Controller) namespaced(name string) string {
return c.vanClient.Namespace + "/" + name
}
func (c *Controller) parseServiceDefinitions(cm *corev1.ConfigMap) map[string]types.ServiceInterface {
definitions := make(map[string]types.ServiceInterface)
if len(cm.Data) > 0 {
for _, v := range cm.Data {
si := types.ServiceInterface{}
err := jsonencoding.Unmarshal([]byte(v), &si)
if err == nil {
definitions[si.Address] = si
}
}
c.desiredServices = definitions
}
return definitions
}
func (c *Controller) runServiceCtrl() {
for c.processNextEvent() {
}
}
func (c *Controller) getInitialBridgeConfig() (*qdr.BridgeConfig, error) {
name := c.namespaced(types.TransportConfigMapName)
obj, exists, err := c.bridgeDefInformer.GetStore().GetByKey(name)
if err != nil {
return nil, fmt.Errorf("Error reading skupper-internal from cache: %s", err)
} else if exists {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
return nil, fmt.Errorf("Expected ConfigMap for %s but got %#v", name, obj)
}
return qdr.GetBridgeConfigFromConfigMap(cm)
} else {
return nil, nil
}
}
func (c *Controller) updateBridgeConfig(name string) error {
obj, exists, err := c.bridgeDefInformer.GetStore().GetByKey(name)
if err != nil {
return fmt.Errorf("Error reading skupper-internal from cache: %s", err)
} else if !exists {
return fmt.Errorf("skupper-internal does not exist: %v", err.Error())
} else {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
return fmt.Errorf("Expected ConfigMap for %s but got %#v", name, obj)
}
desiredBridges := requiredBridges(c.bindings, c.origin)
update, err := desiredBridges.UpdateConfigMap(cm)
if err != nil {
return fmt.Errorf("Error updating %s: %s", cm.ObjectMeta.Name, err)
}
if update {
event.Recordf(ServiceControllerUpdateEvent, "Updating %s", cm.ObjectMeta.Name)
_, err = c.vanClient.KubeClient.CoreV1().ConfigMaps(c.vanClient.Namespace).Update(context.Background(), cm, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("Failed to update %s: %v", name, err.Error())
}
}
}
return nil
}
func (c *Controller) initialiseServiceBindingsMap() (map[string][]int, error) {
c.bindings = map[string]*ServiceBindings{}
// on first initiliasing the service bindings map, need to get any
// port allocations from bridge config
bridges, err := c.getInitialBridgeConfig()
if err != nil {
return nil, err
}
allocations := c.ports.getPortAllocations(bridges)
// TODO: should deduce the ports in use by the router by
// reading config rather than hardcoding them here
c.ports.inuse(int(types.AmqpDefaultPort))
c.ports.inuse(int(types.AmqpsDefaultPort))
c.ports.inuse(int(types.EdgeListenerPort))
c.ports.inuse(int(types.InterRouterListenerPort))
c.ports.inuse(int(types.ConsoleDefaultServicePort))
c.ports.inuse(9090) // currently hardcoded in config
return allocations, nil
}
func (c *Controller) deleteServiceBindings(k string, v *ServiceBindings) {
if v != nil {
v.stop()
}
delete(c.bindings, k)
}
func (c *Controller) updateServiceSync(defs *corev1.ConfigMap) {
c.serviceSyncDefinitionsUpdated(c.parseServiceDefinitions(defs))
}
func (c *Controller) deleteHeadlessProxy(statefulset *appsv1.StatefulSet) error {
return c.vanClient.KubeClient.AppsV1().StatefulSets(c.vanClient.Namespace).Delete(context.Background(), statefulset.ObjectMeta.Name, metav1.DeleteOptions{})
}
func (c *Controller) ensureHeadlessProxyFor(bindings *ServiceBindings, statefulset *appsv1.StatefulSet) error {
serviceInterface := asServiceInterface(bindings)
config, err := qdr.GetRouterConfigForHeadlessProxy(serviceInterface, c.origin, client.Version, c.vanClient.Namespace)
if err != nil {
return err
}
_, err = kube.CheckProxyStatefulSet(client.GetRouterImageDetails(), serviceInterface, statefulset, config, c.vanClient.Namespace, c.vanClient.KubeClient)
return err
}
func (c *Controller) createHeadlessProxyFor(bindings *ServiceBindings) error {
serviceInterface := asServiceInterface(bindings)
config, err := qdr.GetRouterConfigForHeadlessProxy(serviceInterface, c.origin, client.Version, c.vanClient.Namespace)
if err != nil {
return err
}
_, err = kube.NewProxyStatefulSet(client.GetRouterImageDetails(), serviceInterface, config, c.vanClient.Namespace, c.vanClient.KubeClient)
return err
}
func (c *Controller) updateHeadlessProxies() {
for _, v := range c.bindings {
if v.headless != nil {
c.ensureHeadlessProxyFor(v, nil)
}
}
proxies := c.headlessInformer.GetStore().List()
for _, v := range proxies {
proxy := v.(*appsv1.StatefulSet)
def, ok := c.bindings[proxy.Spec.ServiceName]
if !ok || def == nil || def.headless == nil {
c.deleteHeadlessProxy(proxy)
}
}
}
func (c *Controller) processNextEvent() bool {
obj, shutdown := c.events.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.events.Done(obj)
var ok bool
var key string
if key, ok = obj.(string); !ok {
// invalid item
c.events.Forget(obj)
return fmt.Errorf("expected string in events but got %#v", obj)
} else {
category, name := splitKey(key)
switch category {
case "servicedefs":
event.Record(ServiceControllerEvent, "Service definitions have changed")
// get the configmap, parse the json, check against the current servicebindings map
obj, exists, err := c.svcDefInformer.GetStore().GetByKey(name)
if err != nil {
return fmt.Errorf("Error reading skupper-services from cache: %s", err)
} else if exists {
var portAllocations map[string][]int
if c.bindings == nil {
portAllocations, err = c.initialiseServiceBindingsMap()
if err != nil {
return err
}
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
return fmt.Errorf("Expected ConfigMap for %s but got %#v", name, obj)
}
c.updateServiceSync(cm)
if cm.Data != nil && len(cm.Data) > 0 {
for k, v := range cm.Data {
si := types.ServiceInterface{}
err := jsonencoding.Unmarshal([]byte(v), &si)
if err == nil {
c.updateServiceBindings(si, portAllocations)
} else {
event.Recordf(ServiceControllerError, "Could not parse service definition for %s: %s", k, err)
}
}
for k, v := range c.bindings {
_, ok := cm.Data[k]
if !ok {
c.deleteServiceBindings(k, v)
}
}
} else if len(c.bindings) > 0 {
for k, v := range c.bindings {
c.deleteServiceBindings(k, v)
}
}
}
c.updateBridgeConfig(c.namespaced(types.TransportConfigMapName))
c.updateActualServices()
c.updateHeadlessProxies()
case "bridges":
if c.bindings == nil {
// not yet initialised
return nil
}
err := c.updateBridgeConfig(name)
if err != nil {
return err
}
case "actual-services":
if c.bindings == nil {
// not yet initialised
return nil
}
event.Recordf(ServiceControllerEvent, "service event for %s", name)
// name is fully qualified name of the actual service
obj, exists, err := c.svcInformer.GetStore().GetByKey(name)
if err != nil {
return fmt.Errorf("Error reading service %s from cache: %s", name, err)
} else if exists {
svc, ok := obj.(*corev1.Service)
if !ok {
return fmt.Errorf("Expected Service for %s but got %#v", name, obj)
}
bindings := c.bindings[svc.ObjectMeta.Name]
if bindings == nil {
if isOwned(svc) {
err = c.deleteService(svc)
if err != nil {
return err
}
}
} else {
// check that service matches binding def, else update it
err = c.checkServiceFor(bindings, svc)
if err != nil {
return err
}
}
} else {
_, unqualified, err := cache.SplitMetaNamespaceKey(name)
if err != nil {
return fmt.Errorf("Could not determine name of deleted service from key %s: %w", name, err)
}
bindings := c.bindings[unqualified]
if bindings != nil {
if bindings.headless == nil {
err = c.createServiceFor(bindings)
} else if bindings.origin != "" {
err = c.createHeadlessServiceFor(bindings)
}
if err != nil {
return err
}
}
}
case "targetpods":
event.Recordf(ServiceControllerEvent, "Got targetpods event %s", name)
// name is the address of the skupper service
c.updateBridgeConfig(c.namespaced(types.TransportConfigMapName))
case "statefulset":
event.Recordf(ServiceControllerEvent, "Got statefulset proxy event %s", name)
obj, exists, err := c.headlessInformer.GetStore().GetByKey(name)
if err != nil {
return fmt.Errorf("Error reading statefulset %s from cache: %s", name, err)
} else if exists {
statefulset, ok := obj.(*appsv1.StatefulSet)
if !ok {
return fmt.Errorf("Expected StatefulSet for %s but got %#v", name, obj)
}
// a headless proxy was created or updated, does it match the desired binding?
bindings, ok := c.bindings[statefulset.Spec.ServiceName]
if !ok || bindings == nil || bindings.headless == nil {
err = c.deleteHeadlessProxy(statefulset)
if err != nil {
return err
}
} else {
err = c.ensureHeadlessProxyFor(bindings, statefulset)
if err != nil {
return err
}
}
} else {
// a headless proxy was deleted, does it need to be recreated?
_, unqualified, err := cache.SplitMetaNamespaceKey(name)
if err != nil {
return fmt.Errorf("Could not determine name of deleted statefulset from key %s: %w", name, err)
}
for _, v := range c.bindings {
if v.headless != nil && v.headless.Name == unqualified {
err = c.createHeadlessProxyFor(v)
if err != nil {
return err
}
}
}
}
default:
c.events.Forget(obj)
return fmt.Errorf("unexpected event key %s (%s, %s)", key, category, name)
}
c.events.Forget(obj)
}
return nil
}(obj)
if err != nil {
if c.events.NumRequeues(obj) < 5 {
event.Recordf(ServiceControllerError, "Requeuing %v after error: %v", obj, err)
c.events.AddRateLimited(obj)
} else {
event.Recordf(ServiceControllerError, "Giving up on %v after error: %v", obj, err)
}
utilruntime.HandleError(err)
return true
}
return true
}
| [
"\"OWNER_NAME\"",
"\"OWNER_UID\""
]
| []
| [
"OWNER_NAME",
"OWNER_UID"
]
| [] | ["OWNER_NAME", "OWNER_UID"] | go | 2 | 0 | |
setup.py | from setuptools import setup, Extension
from setuptools.command.sdist import sdist as orig_sdist
from distutils.command import build_ext as setuptools_build_ext
from subprocess import call
from sys import version_info as pyversion, platform as osplatform
from ctypes.util import find_library
import os
import os.path as osp
includes = []
libs = []
libdirs = []
class My_sdist(orig_sdist):
def make_release_tree(self, base_dir, files):
orig_sdist.make_release_tree(self, base_dir, files)
# checkout libosmium in the required version
tarball = 'https://github.com/osmcode/libosmium/archive/v%s.tar.gz' % libosmium_version
print("Downloading and adding libosmium sources from", tarball)
call('cd %s && wget -O - -q %s | tar xz' % (base_dir, tarball), shell=True)
# checkout protozero in the required version
tarball = 'https://github.com/mapbox/protozero/archive/v%s.tar.gz' % protozero_version
print("Downloading and adding protozero sources from", tarball)
call('cd %s && wget -O - -q %s | tar xz' % (base_dir, tarball), shell=True)
def get_versions():
""" Read the version file.
The file cannot be directly imported because it is not installed
yet.
"""
version_py = os.path.join(os.path.split(__file__)[0], "src/osmium/version.py")
v = {}
with open(version_py) as version_file:
# Execute the code in version.py.
exec(compile(version_file.read(), version_py, 'exec'), v)
return v['pyosmium_release'], v['libosmium_version'], v['protozero_version']
def find_includes(libname, chk_file=None, prefix=None, version_postfix=None):
if chk_file is None:
chk_file = osp.join('include', libname, 'version.hpp')
if prefix is not None:
if not os.path.isfile(osp.join(prefix, chk_file)):
raise RuntimeError("Prefix for %s set but library not found in '%s'."
% (libname, prefix))
return osp.join(prefix, 'include')
search_paths = []
if version_postfix:
search_paths.append('%s-%s' % (libname, version_postfix))
search_paths.append(osp.join('..', libname))
for p in search_paths:
if os.path.isfile(osp.join(p, chk_file)):
print("%s found in '%s'." % (libname, p))
return osp.join(p, 'include')
print("Using global %s" % libname)
pyosmium_release, libosmium_version, protozero_version = get_versions()
## boost dependencies
boost_prefix = os.environ.get('BOOST_PREFIX',
'c:/libs' if osplatform == "win32" else '/usr')
includes.append(os.path.join(boost_prefix, 'include'))
if 'BOOST_VERSION' in os.environ:
for boost_dir in ('boost-%s', 'boost%s'):
if os.path.isdir(os.path.join(boost_prefix, 'include', boost_dir % os.environ['BOOST_VERSION'])):
includes.append(os.path.join(boost_prefix, 'include', boost_dir %os.environ['BOOST_VERSION']))
break
else:
raise Exception("Cannot find boost headers")
elif 'BOOST_PREFIX' in os.environ:
if os.path.isdir(os.path.join(boost_prefix, 'include', 'boost')):
includes.append(os.path.join(boost_prefix, 'include', 'boost'))
else:
raise Exception("Cannot find boost headers")
if 'BOOST_PREFIX' in os.environ:
libdirs.append(os.path.join(boost_prefix, 'lib'))
elif osplatform in ["linux", "linux2"] and os.path.isdir('/usr/lib/x86_64-linux-gnu/'):
libdirs.append('/usr/lib/x86_64-linux-gnu/')
else:
libdirs.append(os.path.join(boost_prefix, 'lib'))
# try to find the boost library matching the python version
suffixes = [ # Debian naming convention for version installed in parallel
"-py%d%d" % (pyversion.major, pyversion.minor),
# Gentoo naming convention for version installed in parallel
"-%d.%d" % (pyversion.major, pyversion.minor),
# Darwin
"%d%d" % (pyversion.major, pyversion.minor),
# standard suffix for Python3
"%d" % (pyversion.major),
# standard naming
"",
# former naming schema?
"-mt"
]
for suf in suffixes:
libboost = "boost_python%s" % suf
found = find_library(libboost)
if found:
libs.append(libboost)
break
else:
# Visual C++ supports auto-linking, no library needed
if osplatform != "win32":
raise Exception("Cannot find boost_python library")
if osplatform != "win32":
orig_compiler = setuptools_build_ext.customize_compiler
def cpp_compiler(compiler):
retval = orig_compiler(compiler)
# force C++ compiler
# Note that we only exchange the compiler as we want to keep the
# original Python cflags.
if len(compiler.compiler_cxx) > 0:
compiler.compiler_so[0] = compiler.compiler_cxx[0]
# remove warning that does not make sense for C++
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
setuptools_build_ext.customize_compiler = cpp_compiler
### osmium dependencies
osmium_inc = find_includes('libosmium',
chk_file=osp.join('include', 'osmium', 'version.hpp'),
prefix=os.environ.get('LIBOSMIUM_PREFIX'),
version_postfix=libosmium_version)
if osmium_inc is not None:
includes.insert(0, osmium_inc)
### protozero dependencies
protozero_inc = find_includes('protozero',
prefix=os.environ.get('PROTOZERO_PREFIX'),
version_postfix=protozero_version)
if protozero_inc is not None:
includes.insert(0, protozero_inc)
if osplatform == "win32" :
osmium_libs = ('expat', 'zlib', 'bzip2', 'ws2_32')
extra_compile_args = [ '-DWIN32_LEAN_AND_MEAN', '-D_CRT_SECURE_NO_WARNINGS', '-DNOMINMAX', '/wd4996', '/EHsc' ]
else:
osmium_libs = ('expat', 'pthread', 'z', 'bz2')
extra_compile_args = [ '-std=c++11', '-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64', '-D__STDC_FORMAT_MACROS' ]
libs.extend(osmium_libs)
extensions = []
extensions.append(Extension('osmium._osmium',
sources = ['lib/osmium.cc'],
include_dirs = includes,
libraries = libs,
library_dirs = libdirs,
language = 'c++',
extra_compile_args = extra_compile_args
))
packages = ['osmium']
for ext in ('io', 'index', 'geom'):
extensions.append(Extension('osmium.%s' % ext,
sources = ['lib/%s.cc' % ext],
include_dirs = includes,
libraries = libs,
library_dirs = libdirs,
language = 'c++',
extra_compile_args = extra_compile_args
))
for ext in ('osm', 'replication'):
extensions.append(Extension('osmium.%s._%s' % (ext, ext),
sources = ['lib/%s.cc' % ext],
include_dirs = includes,
libraries = libs,
library_dirs = libdirs,
language = 'c++',
extra_compile_args = extra_compile_args
))
packages.append('osmium.%s' % ext)
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: C++",
]
descfile=open('README.rst')
long_description = descfile.read()
descfile.close()
setup (name = 'osmium',
version = pyosmium_release,
description = 'Python bindings for libosmium, the data processing library for OSM data',
long_description=long_description,
author='Sarah Hoffmann',
author_email='[email protected]',
maintainer='Sarah Hoffmann',
maintainer_email='[email protected]',
download_url='https://github.com/osmcode/pyosmium',
url='http://osmcode.org/pyosmium',
keywords=["OSM", "OpenStreetMap", "Osmium"],
license='BSD',
scripts=['tools/pyosmium-get-changes', 'tools/pyosmium-up-to-date'],
classifiers=classifiers,
packages = packages,
package_dir = {'' : 'src'},
package_data = { 'osmium' : [ '*.dll' ] },
cmdclass={'sdist' : My_sdist},
ext_modules = extensions)
| []
| []
| [
"BOOST_PREFIX",
"PROTOZERO_PREFIX",
"LIBOSMIUM_PREFIX",
"BOOST_VERSION"
]
| [] | ["BOOST_PREFIX", "PROTOZERO_PREFIX", "LIBOSMIUM_PREFIX", "BOOST_VERSION"] | python | 4 | 0 | |
test/unit/worker/convert/test_convert_image.py | import unittest
import os
from workers.convert.image_scaling import scale_image
class ConvertImageTest(unittest.TestCase):
"""
Test the image scaling functions.
Basically just taking an existing image file, passing it to the function
with arbitrary parameters and checking if another file was generated.
"""
resource_dir = os.environ['TEST_RESOURCE_DIR']
def test_scale_jpg(self):
"""Test scaling for JPEG type image."""
image_file = os.path.join(self.resource_dir, 'files', 'test.jpg')
scale_image(image_file, self.resource_dir, 30, 40)
self.assertTrue(os.path.isfile(
f'{self.resource_dir}/test.jpg'))
os.remove(os.path.join(self.resource_dir, 'test.jpg'))
def test_scale_tif(self):
"""Test scaling for TIF type image."""
image_file = os.path.join(self.resource_dir, 'files', 'test.tif')
scale_image(image_file, self.resource_dir, 30, 40)
self.assertTrue(os.path.isfile(
f'{self.resource_dir}/test.tif'))
os.remove(os.path.join(self.resource_dir, 'test.tif'))
def test_keep_ratio_tif(self):
"""Test scaling for TIF type image if keep_ratio is False."""
image_file = os.path.join(self.resource_dir, 'files', 'test.tif')
scale_image(image_file, self.resource_dir, 30, 40, False)
self.assertTrue(os.path.isfile(
f'{self.resource_dir}/test.tif'))
os.remove(os.path.join(self.resource_dir, 'test.tif'))
| []
| []
| [
"TEST_RESOURCE_DIR"
]
| [] | ["TEST_RESOURCE_DIR"] | python | 1 | 0 | |
db/db.go | package db
import (
"fmt"
"os"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/kmdkuk/gin-auth/model"
)
var (
db *gorm.DB
)
const (
// データベース
dialect = "mysql"
// TODO: ユーザ名とパスワードは.envなど環境変数から読み込む。
// ユーザー名
dbUser = "root"
// プロトコル
dbProtocol = "tcp(db:3306)"
// DB名
dbName = "go_auth"
)
// Init is initialize db from main function
func Init() error {
dbPass := os.Getenv("MYSQL_ROOT_PASSWORD")
connectTemplate := "%s:%s@%s/%s?charset=utf8&parseTime=True&loc=Local"
connect := fmt.Sprintf(connectTemplate, dbUser, dbPass, dbProtocol, dbName)
var err error
db, err = gorm.Open(dialect, connect)
if err != nil {
return err
}
if err := autoMigration(); err != nil {
return err
}
return nil
}
// GetDB is called in models
func GetDB() *gorm.DB {
return db
}
// Close is closing db
func Close() {
if err := db.Close(); err != nil {
panic(err)
}
}
func autoMigration() error {
db.AutoMigrate(&model.User{})
return db.Error
}
| [
"\"MYSQL_ROOT_PASSWORD\""
]
| []
| [
"MYSQL_ROOT_PASSWORD"
]
| [] | ["MYSQL_ROOT_PASSWORD"] | go | 1 | 0 | |
cmd/kubens/switch.go | package main
import (
"io"
"os"
"github.com/pkg/errors"
errors2 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/ahmetb/kubectx/internal/cmdutil"
"github.com/ahmetb/kubectx/internal/kubeconfig"
"github.com/ahmetb/kubectx/internal/printer"
)
type SwitchOp struct {
Target string // '-' for back and forth, or NAME
}
func (s SwitchOp) Run(_, stderr io.Writer) error {
kc := new(kubeconfig.Kubeconfig).WithLoader(cmdutil.DefaultLoader)
defer kc.Close()
if err := kc.Parse(); err != nil {
return errors.Wrap(err, "kubeconfig error")
}
toNS, err := switchNamespace(kc, s.Target)
if err != nil {
return err
}
err = printer.Success(stderr, "Active namespace is %q", toNS)
return err
}
func switchNamespace(kc *kubeconfig.Kubeconfig, ns string) (string, error) {
ctx := kc.GetCurrentContext()
if ctx == "" {
return "", errors.New("current-context is not set")
}
curNS, err := kc.NamespaceOfContext(ctx)
if ctx == "" {
return "", errors.New("failed to get current namespace")
}
f := NewNSFile(ctx)
prev, err := f.Load()
if err != nil {
return "", errors.Wrap(err, "failed to load previous namespace from file")
}
if ns == "-" {
if prev == "" {
return "", errors.Errorf("No previous namespace found for current context (%s)", ctx)
}
ns = prev
}
ok, err := namespaceExists(kc, ns)
if err != nil {
return "", errors.Wrap(err, "failed to query if namespace exists (is cluster accessible?)")
}
if !ok {
return "", errors.Errorf("no namespace exists with name %q", ns)
}
if err := kc.SetNamespace(ctx, ns); err != nil {
return "", errors.Wrapf(err, "failed to change to namespace %q", ns)
}
if err := kc.Save(); err != nil {
return "", errors.Wrap(err, "failed to save kubeconfig file")
}
if curNS != ns {
if err := f.Save(curNS); err != nil {
return "", errors.Wrap(err, "failed to save the previous namespace to file")
}
}
return ns, nil
}
func namespaceExists(kc *kubeconfig.Kubeconfig, ns string) (bool, error) {
// for tests
if os.Getenv("_MOCK_NAMESPACES") != "" {
return ns == "ns1" || ns == "ns2", nil
}
clientset, err := newKubernetesClientSet(kc)
if err != nil {
return false, errors.Wrap(err, "failed to initialize k8s REST client")
}
namespace, err := clientset.CoreV1().Namespaces().Get(ns, metav1.GetOptions{})
if errors2.IsNotFound(err) {
return false, nil
}
return namespace != nil, errors.Wrapf(err, "failed to query "+
"namespace %q from k8s API", ns)
}
| [
"\"_MOCK_NAMESPACES\""
]
| []
| [
"_MOCK_NAMESPACES"
]
| [] | ["_MOCK_NAMESPACES"] | go | 1 | 0 | |
tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.async_pipeline_task_gen."""
import os
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen as asptg
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.testing import test_async_pipeline
from tfx.utils import status as status_lib
from google.protobuf import any_pb2
from ml_metadata.proto import metadata_store_pb2
class AsyncPipelineTaskGeneratorTest(test_utils.TfxTest,
parameterized.TestCase):
def setUp(self):
super().setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
self._pipeline_root = pipeline_root
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
# Sets up the pipeline.
pipeline = test_async_pipeline.create_pipeline()
self._pipeline = pipeline
self._pipeline_info = pipeline.pipeline_info
self._pipeline_runtime_spec = pipeline.runtime_spec
self._pipeline_runtime_spec.pipeline_root.field_value.string_value = (
pipeline_root)
# Extracts components.
self._example_gen = pipeline.nodes[0].pipeline_node
self._transform = pipeline.nodes[1].pipeline_node
self._trainer = pipeline.nodes[2].pipeline_node
self._task_queue = tq.TaskQueue()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
def _is_pure_service_node(unused_pipeline_state, node_id):
return node_id == self._example_gen.node_info.id
def _is_mixed_service_node(unused_pipeline_state, node_id):
return node_id == self._transform.node_info.id
self._mock_service_job_manager.is_pure_service_node.side_effect = (
_is_pure_service_node)
self._mock_service_job_manager.is_mixed_service_node.side_effect = (
_is_mixed_service_node)
def _default_ensure_node_services(unused_pipeline_state, node_id):
self.assertIn(
node_id,
(self._example_gen.node_info.id, self._transform.node_info.id))
return service_jobs.ServiceStatus.RUNNING
self._mock_service_job_manager.ensure_node_services.side_effect = (
_default_ensure_node_services)
def _finish_node_execution(self, use_task_queue, exec_node_task):
"""Simulates successful execution of a node."""
test_utils.fake_execute_node(self._mlmd_connection, exec_node_task)
if use_task_queue:
dequeued_task = self._task_queue.dequeue()
self._task_queue.task_done(dequeued_task)
self.assertEqual(exec_node_task.task_id, dequeued_task.task_id)
def _generate_and_test(self,
use_task_queue,
num_initial_executions,
num_tasks_generated,
num_new_executions,
num_active_executions,
expected_exec_nodes=None,
ignore_update_node_state_tasks=False):
"""Generates tasks and tests the effects."""
return test_utils.run_generator_and_test(
self,
self._mlmd_connection,
asptg.AsyncPipelineTaskGenerator,
self._pipeline,
self._task_queue,
use_task_queue,
self._mock_service_job_manager,
num_initial_executions=num_initial_executions,
num_tasks_generated=num_tasks_generated,
num_new_executions=num_new_executions,
num_active_executions=num_active_executions,
expected_exec_nodes=expected_exec_nodes,
ignore_update_node_state_tasks=ignore_update_node_state_tasks)
@parameterized.parameters(0, 1)
def test_no_tasks_generated_when_no_inputs(self, min_count):
"""Tests no tasks are generated when there are no inputs, regardless of min_count."""
for node in self._pipeline.nodes:
for v in node.pipeline_node.inputs.inputs.values():
v.min_count = min_count
with self._mlmd_connection as m:
pipeline_state = test_utils.get_or_create_pipeline_state(
m, self._pipeline)
task_gen = asptg.AsyncPipelineTaskGenerator(
m, lambda _: False, service_jobs.DummyServiceJobManager())
tasks = task_gen.generate(pipeline_state)
self.assertEmpty(tasks, 'Expected no task generation when no inputs.')
self.assertEmpty(
test_utils.get_non_orchestrator_executions(m),
'There must not be any registered executions since no tasks were '
'generated.')
@parameterized.parameters(False, True)
def test_task_generation(self, use_task_queue):
"""Tests async pipeline task generation.
Args:
use_task_queue: If task queue is enabled, new tasks are only generated if
a task with the same task_id does not already exist in the queue.
`use_task_queue=False` is useful to test the case of task generation
when task queue is empty (for eg: due to orchestrator restart).
"""
# Simulate that ExampleGen has already completed successfully.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Generate once.
[update_example_gen_task, update_transform_task,
exec_transform_task] = self._generate_and_test(
use_task_queue,
num_initial_executions=1,
num_tasks_generated=3,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))
self._mock_service_job_manager.ensure_node_services.assert_has_calls([
mock.call(mock.ANY, self._example_gen.node_info.id),
mock.call(mock.ANY, self._transform.node_info.id)
])
# No new effects if generate called again.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=2,
num_tasks_generated=1 if use_task_queue else 3,
num_new_executions=0,
num_active_executions=1,
expected_exec_nodes=[] if use_task_queue else [self._transform])
if not use_task_queue:
exec_transform_task = tasks[2]
# Mark transform execution complete.
self._finish_node_execution(use_task_queue, exec_transform_task)
# Trainer execution task should be generated next.
[
update_example_gen_task, update_transform_task, update_trainer_task,
exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=2,
num_tasks_generated=4,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Mark the trainer execution complete.
self._finish_node_execution(use_task_queue, exec_trainer_task)
# Only UpdateNodeStateTask are generated as there are no new inputs.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=3,
num_tasks_generated=3,
num_new_executions=0,
num_active_executions=0)
for task in tasks:
self.assertTrue(task_lib.is_update_node_state_task(task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
# Fake another ExampleGen run.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Both transform and trainer tasks should be generated as they both find
# new inputs.
[
update_example_gen_task, update_transform_task, exec_transform_task,
update_trainer_task, exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=4,
num_tasks_generated=5,
num_new_executions=2,
num_active_executions=2,
expected_exec_nodes=[self._transform, self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Re-generation will produce the same tasks when task queue disabled.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=6,
num_tasks_generated=1 if use_task_queue else 5,
num_new_executions=0,
num_active_executions=2,
expected_exec_nodes=[]
if use_task_queue else [self._transform, self._trainer])
if not use_task_queue:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_exec_node_task(tasks[2]))
self.assertTrue(task_lib.is_update_node_state_task(tasks[3]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_exec_node_task(tasks[4]))
exec_transform_task = tasks[2]
exec_trainer_task = tasks[4]
else:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
# Mark transform execution complete.
self._finish_node_execution(use_task_queue, exec_transform_task)
# Mark the trainer execution complete.
self._finish_node_execution(use_task_queue, exec_trainer_task)
# Trainer should be triggered again due to transform producing new output.
[
update_example_gen_task, update_transform_task, update_trainer_task,
exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=6,
num_tasks_generated=4,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Finally, no new tasks once trainer completes.
self._finish_node_execution(use_task_queue, exec_trainer_task)
[update_example_gen_task, update_transform_task,
update_trainer_task] = self._generate_and_test(
use_task_queue,
num_initial_executions=7,
num_tasks_generated=3,
num_new_executions=0,
num_active_executions=0)
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.STARTED, update_trainer_task.state)
if use_task_queue:
self.assertTrue(self._task_queue.is_empty())
@parameterized.parameters(False, True)
def test_task_generation_when_node_stopped(self, stop_transform):
"""Tests stopped nodes are ignored when generating tasks."""
# Simulate that ExampleGen has already completed successfully.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Generate once.
num_initial_executions = 1
if stop_transform:
num_tasks_generated = 1
num_new_executions = 0
num_active_executions = 0
with self._mlmd_connection as m:
pipeline_state = test_utils.get_or_create_pipeline_state(
m, self._pipeline)
with pipeline_state:
with pipeline_state.node_state_update_context(
task_lib.NodeUid.from_pipeline_node(
self._pipeline, self._transform)) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
else:
num_tasks_generated = 3
num_new_executions = 1
num_active_executions = 1
tasks = self._generate_and_test(
True,
num_initial_executions=num_initial_executions,
num_tasks_generated=num_tasks_generated,
num_new_executions=num_new_executions,
num_active_executions=num_active_executions)
self.assertLen(tasks, num_tasks_generated)
if stop_transform:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)
else:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)
self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[1].state)
self.assertTrue(task_lib.is_exec_node_task(tasks[2]))
def test_service_job_failed(self):
"""Tests task generation when example-gen service job fails."""
def _ensure_node_services(unused_pipeline_state, node_id):
self.assertEqual('my_example_gen', node_id)
return service_jobs.ServiceStatus.FAILED
self._mock_service_job_manager.ensure_node_services.side_effect = (
_ensure_node_services)
[update_task] = self._generate_and_test(
True,
num_initial_executions=0,
num_tasks_generated=1,
num_new_executions=0,
num_active_executions=0)
self.assertTrue(task_lib.is_update_node_state_task(update_task))
self.assertEqual(status_lib.Code.ABORTED, update_task.status.code)
def test_triggering_upon_exec_properties_change(self):
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
[exec_transform_task] = self._generate_and_test(
False,
num_initial_executions=1,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
# Fail the registered execution.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.last_known_state = metadata_store_pb2.Execution.FAILED
# Try to generate with same execution properties. This should not trigger
# as there are no changes since last run.
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=0,
num_new_executions=0,
num_active_executions=0,
ignore_update_node_state_tasks=True)
# Change execution properties of last run.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.custom_properties['a_param'].int_value = 20
# Generating with different execution properties should trigger.
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
def test_triggering_upon_executor_spec_change(self):
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(1)
[exec_transform_task] = self._generate_and_test(
False,
num_initial_executions=1,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
# Fail the registered execution.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.last_known_state = metadata_store_pb2.Execution.FAILED
# Try to generate with same executor spec. This should not trigger as
# there are no changes since last run.
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(1)
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=0,
num_new_executions=0,
num_active_executions=0,
ignore_update_node_state_tasks=True)
# Generating with a different executor spec should trigger.
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(2)
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
def _fake_executor_spec(val):
def _get_executor_spec(*unused_args, **unused_kwargs):
value = metadata_store_pb2.Value(int_value=val)
any_proto = any_pb2.Any()
any_proto.Pack(value)
return any_proto
return _get_executor_spec
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
sdk/python/tests/integration/feature_repos/universal/data_sources/snowflake.py | import os
import uuid
from typing import Dict, List, Optional
import pandas as pd
from feast import SnowflakeSource
from feast.data_source import DataSource
from feast.infra.offline_stores.snowflake import SnowflakeOfflineStoreConfig
from feast.infra.offline_stores.snowflake_source import SavedDatasetSnowflakeStorage
from feast.infra.utils.snowflake_utils import get_snowflake_conn, write_pandas
from feast.repo_config import FeastConfigBaseModel
from tests.integration.feature_repos.universal.data_source_creator import (
DataSourceCreator,
)
class SnowflakeDataSourceCreator(DataSourceCreator):
tables: List[str] = []
def __init__(self, project_name: str):
super().__init__()
self.project_name = project_name
self.offline_store_config = SnowflakeOfflineStoreConfig(
type="snowflake.offline",
account=os.environ["SNOWFLAKE_CI_DEPLOYMENT"],
user=os.environ["SNOWFLAKE_CI_USER"],
password=os.environ["SNOWFLAKE_CI_PASSWORD"],
role=os.environ["SNOWFLAKE_CI_ROLE"],
warehouse=os.environ["SNOWFLAKE_CI_WAREHOUSE"],
database="FEAST",
schema="OFFLINE",
)
def create_data_source(
self,
df: pd.DataFrame,
destination_name: str,
suffix: Optional[str] = None,
event_timestamp_column="ts",
created_timestamp_column="created_ts",
field_mapping: Dict[str, str] = None,
) -> DataSource:
snowflake_conn = get_snowflake_conn(self.offline_store_config)
destination_name = self.get_prefixed_table_name(destination_name)
write_pandas(snowflake_conn, df, destination_name, auto_create_table=True)
self.tables.append(destination_name)
return SnowflakeSource(
table=destination_name,
event_timestamp_column=event_timestamp_column,
created_timestamp_column=created_timestamp_column,
date_partition_column="",
field_mapping=field_mapping or {"ts_1": "ts"},
)
def create_saved_dataset_destination(self) -> SavedDatasetSnowflakeStorage:
table = self.get_prefixed_table_name(
f"persisted_ds_{str(uuid.uuid4()).replace('-', '_')}"
)
self.tables.append(table)
return SavedDatasetSnowflakeStorage(table_ref=table)
def create_offline_store_config(self) -> FeastConfigBaseModel:
return self.offline_store_config
def get_prefixed_table_name(self, suffix: str) -> str:
return f"{self.project_name}_{suffix}"
def teardown(self):
snowflake_conn = get_snowflake_conn(self.offline_store_config)
with snowflake_conn as conn:
cur = conn.cursor()
for table in self.tables:
cur.execute(f'DROP TABLE IF EXISTS "{table}"')
| []
| []
| [
"SNOWFLAKE_CI_DEPLOYMENT",
"SNOWFLAKE_CI_PASSWORD",
"SNOWFLAKE_CI_WAREHOUSE",
"SNOWFLAKE_CI_USER",
"SNOWFLAKE_CI_ROLE"
]
| [] | ["SNOWFLAKE_CI_DEPLOYMENT", "SNOWFLAKE_CI_PASSWORD", "SNOWFLAKE_CI_WAREHOUSE", "SNOWFLAKE_CI_USER", "SNOWFLAKE_CI_ROLE"] | python | 5 | 0 | |
app/job/openplatform/article/http/http.go | package http
import (
"net/http"
"go-common/app/job/openplatform/article/conf"
"go-common/app/job/openplatform/article/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var ajSrv *service.Service
// Init .
func Init(conf *conf.Config, srv *service.Service) {
ajSrv = srv
// init outer router
engineOuter := bm.DefaultServer(conf.BM)
outerRouter(engineOuter)
if err := engineOuter.Start(); err != nil {
log.Error("xhttp.Serve error(%v)", err)
panic(err)
}
}
// outerRouter init outer router
func outerRouter(r *bm.Engine) {
r.Ping(ping)
r.Register(register)
cr := r.Group("/sitemap")
{
cr.GET("/read/detail.xml", sitemap)
}
}
func ping(c *bm.Context) {
if err := ajSrv.Ping(c); err != nil {
log.Error("ping error(%v)", err)
c.AbortWithStatus(http.StatusServiceUnavailable)
}
}
func register(c *bm.Context) {
c.JSON(map[string]interface{}{}, nil)
}
func sitemap(c *bm.Context) {
res, l := ajSrv.SitemapXML(c)
c.Writer.Header().Set("Content-Type", "text/xml")
c.Writer.Header().Set("Content-Length", l)
c.Status(http.StatusOK)
c.Writer.Write([]byte(res))
}
| []
| []
| []
| [] | [] | go | null | null | null |
cmd/operate.go | // Copyright 2016 Red Hat, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
"os/signal"
"sync"
"syscall"
"github.com/funktionio/funktion/pkg/funktion"
"github.com/go-kit/kit/log"
"github.com/spf13/cobra"
flag "github.com/spf13/pflag"
"k8s.io/client-go/1.5/pkg/api"
"k8s.io/client-go/1.5/tools/clientcmd"
)
func init() {
RootCmd.AddCommand(newOperateCmd())
}
type operateCmd struct {
namespace string
allNamespaces bool
}
func newOperateCmd() *cobra.Command {
p := &operateCmd{}
cmd := &cobra.Command{
Use: "operate",
Short: "Runs the funktion operator",
Long: `This command will startup the operator for funktion`,
Run: func(cmd *cobra.Command, args []string) {
handleError(p.operate(cmd, args))
},
}
f := cmd.Flags()
f.StringVarP(&p.namespace, "namespace", "n", "", "the name of the namespace to watch for resources")
f.BoolVarP(&p.allNamespaces, "all", "a", false, "if enabled all namespaces will be watched. This option typically requires a cluster administrator role")
return cmd
}
func (p *operateCmd) operate(cmd *cobra.Command, args []string) error {
logger := log.NewContext(log.NewLogfmtLogger(os.Stdout)).
With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller).
With("operator", "funktion")
flagset := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
flagset.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the config file to use for CLI requests.")
overrides := &clientcmd.ConfigOverrides{}
overrideFlags := clientcmd.RecommendedConfigOverrideFlags("")
clientcmd.BindOverrideFlags(overrides, flagset, overrideFlags)
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
flagset.Parse(os.Args[1:])
cfg, err := kubeConfig.ClientConfig()
if err != nil {
logger.Log("msg", "failed to create Kubernetes client config", "error", err)
return err
}
namespace := p.namespace
if p.allNamespaces {
namespace = api.NamespaceAll
fmt.Printf("Funktion operator is starting watching namespace: %s\n", namespace)
} else {
if len(namespace) == 0 {
namespace = os.Getenv("KUBERNETES_NAMESPACE")
if len(namespace) <= 0 {
namespace, _, err := kubeConfig.Namespace()
if err != nil {
return fmt.Errorf("Could not detect namespace %v", err)
}
if len(namespace) <= 0 {
return fmt.Errorf("No namespace argument or $KUBERNETES_NAMESPACE environment variable specified and we could not detect the current namespace!")
}
}
}
fmt.Printf("Funktion operator is starting watching namespace: '%s'\n", namespace)
}
ko, err := funktion.New(cfg, logger, namespace)
if err != nil {
logger.Log("error", err)
return err
}
stopc := make(chan struct{})
errc := make(chan error)
var wg sync.WaitGroup
wg.Add(1)
go func() {
if err := ko.Run(stopc); err != nil {
errc <- err
}
wg.Done()
}()
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
fmt.Fprintln(os.Stderr)
logger.Log("msg", "Received SIGTERM, exiting gracefully...")
close(stopc)
wg.Wait()
case err := <-errc:
logger.Log("msg", "Unexpected error received", "error", err)
close(stopc)
wg.Wait()
return err
}
return nil
}
| [
"\"KUBERNETES_NAMESPACE\""
]
| []
| [
"KUBERNETES_NAMESPACE"
]
| [] | ["KUBERNETES_NAMESPACE"] | go | 1 | 0 | |
application/authorizationMiddleware.go | package application
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/jinzhu/gorm"
newrelic "github.com/newrelic/go-agent"
"github.com/server-may-cry/bubble-go/models"
"github.com/server-may-cry/bubble-go/mynewrelic"
"github.com/server-may-cry/bubble-go/platforms"
)
// Middleware for http router to authorize user
type Middleware func(next http.Handler) http.Handler
// AuthorizationMiddleware check signature and load user
func AuthorizationMiddleware(db *gorm.DB) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
return // all request from client send by POST method
}
buf, _ := ioutil.ReadAll(r.Body)
requestBodyCopy := ioutil.NopCloser(bytes.NewBuffer(buf))
r.Body = requestBodyCopy
request := AuthRequestPart{}
if err := json.Unmarshal(buf, &request); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var stringToHash string
platformID, exist := platforms.GetByName(request.SysID)
if !exist {
log.Panicf("not exist platform %s", request.SysID)
}
switch request.SysID {
case "VK":
stringToHash = fmt.Sprintf(
"%s_%d_%s",
os.Getenv("VK_APP_ID"),
request.ExtID,
os.Getenv("VK_SECRET"),
)
case "OK":
stringToHash = fmt.Sprintf(
"%d%s%s",
request.ExtID,
request.SessionKey,
os.Getenv("OK_SECRET"),
)
default:
http.Error(w, fmt.Sprintf("Unknown platform %s", request.SysID), http.StatusBadRequest)
return
}
data := []byte(stringToHash)
expectedAuthKey := fmt.Sprintf("%x", md5.Sum(data))
if expectedAuthKey != request.AuthKey {
log.Printf(
"authorization failure %s %s %s",
stringToHash,
expectedAuthKey,
request.AuthKey,
)
http.Error(w, fmt.Sprintf("Bad auth key %s", request.AuthKey), http.StatusForbidden)
return
}
var user models.User
s := newrelic.DatastoreSegment{
StartTime: newrelic.StartSegmentNow(r.Context().Value(mynewrelic.Ctx).(newrelic.Transaction)),
Product: newrelic.DatastorePostgres,
Collection: "user",
Operation: "SELECT",
}
db.Where("sys_id = ? AND ext_id = ?", platformID, request.ExtID).First(&user)
_ = s.End()
if user.ID != 0 { // check user exists
ctx := context.WithValue(r.Context(), userCtxID, user)
r = r.WithContext(ctx)
}
next.ServeHTTP(w, r)
})
}
}
| [
"\"VK_APP_ID\"",
"\"VK_SECRET\"",
"\"OK_SECRET\""
]
| []
| [
"OK_SECRET",
"VK_SECRET",
"VK_APP_ID"
]
| [] | ["OK_SECRET", "VK_SECRET", "VK_APP_ID"] | go | 3 | 0 | |
SampleClients/filecheck/vtFileCheck.go | // vtFileCheck.go - checks if VirusTotal knows a given file.
package main
import (
"flag"
"fmt"
"os"
"github.com/williballenthin/govt"
)
var apikey string
var apiurl string
var rsrc string
func init() {
flag.StringVar(&apikey, "apikey", os.Getenv("VT_API_KEY"), "Set environment variable VT_API_KEY to your VT API Key or specify on prompt")
flag.StringVar(&apiurl, "apiurl", "https://www.virustotal.com/vtapi/v2/", "URL of the VirusTotal API to be used.")
flag.StringVar(&rsrc, "rsrc", "8ac31b7350a95b0b492434f9ae2f1cde", "resource of file to check VT for. Resource can be md5, sha-1 or sha-2 sum of a file.")
}
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
flag.Parse()
c, err := govt.New(govt.SetApikey(apikey), govt.SetUrl(apiurl))
check(err)
r, err := c.GetFileReport(rsrc)
check(err)
if r.ResponseCode == 0 {
//fmt.Println( r.VerboseMsg )
fmt.Println(rsrc + " NOT KNOWN by VirusTotal")
} else {
//fmt.Println(rsrc + "["+r.Positives+"/"+r.Total+"] IS KNOWN by VirusTotal")
fmt.Printf("%s [%d/%d] IS KNOWN by VirusTotal\n", rsrc, r.Positives, r.Total)
//j, err := json.MarshalIndent(r, "", " ")
//fmt.Printf("FileReport: ")
//os.Stdout.Write(j)
}
}
| [
"\"VT_API_KEY\""
]
| []
| [
"VT_API_KEY"
]
| [] | ["VT_API_KEY"] | go | 1 | 0 | |
tidb-server/fuzz/fuzz.go | // +build gofuzz
package fuzz
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pragmatwice/go-squirrel/comparer"
// to pin dep in go.mod
_ "github.com/oraluben/go-fuzz/go-fuzz-dep"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/tidb-server/internal"
)
var tidbConn *sql.DB = nil
var mysqlConn *sql.DB = nil
var err error
var fuzzLogger *log.Logger
var verboseLevel int = 0
func init() {
os.Args = []string{os.Args[0]}
if verboseStr := os.Getenv("TIFUZZ_VERBOSE"); verboseStr != "" {
verboseLevel, err = strconv.Atoi(verboseStr)
if err != nil {
panic(err)
}
}
instanceDir, err := ioutil.TempDir("", "tidb-fuzz.*")
if err != nil {
panic(err)
}
sockName := path.Join(instanceDir, "tidb.sock")
storeDir := path.Join(instanceDir, "store")
tmpDir := path.Join(instanceDir, "tmp")
logFile := path.Join(instanceDir, "tidb.log")
fuzzLogFile := path.Join(instanceDir, "fuzz.log")
slowQueryFile := path.Join(instanceDir, "tidb-slow-query.log")
fuzzLog, err := os.OpenFile(fuzzLogFile, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
panic(err)
}
fuzzLogger = log.New(fuzzLog, "", log.Lshortfile|log.LstdFlags)
go internal.MainWithConfig(func(c *config.Config) {
c.Host = ""
c.Port = 0
c.Socket = sockName
c.Store = "unistore"
c.Path = storeDir
c.Status.ReportStatus = false
c.TempStoragePath = tmpDir
c.Log.File.Filename = logFile
c.Log.SlowQueryFile = slowQueryFile
})
mysqlInstanceDir := strings.ReplaceAll(instanceDir, "tidb-fuzz", "mysql-fuzz")
err = os.Mkdir(mysqlInstanceDir, os.ModePerm)
if err != nil {
fuzzLogger.Panic("failed to create mysql instance datadir:", err)
}
mysqlSockName := path.Join(mysqlInstanceDir, "mysql.sock")
mysqlPidFile := path.Join(mysqlInstanceDir, "mysql.pid")
mysqlDataDir := path.Join(mysqlInstanceDir, "data")
mysqlLogFile := path.Join(mysqlInstanceDir, "mysql.log")
mysqlSlowLogFile := path.Join(mysqlInstanceDir, "mysql-slow-query.log")
// ref to https://dev.mysql.com/doc/refman/8.0/en/multiple-servers.html
mysqldInit := exec.Command("mysqld", "--initialize-insecure", fmt.Sprintf("--datadir=%s", mysqlDataDir))
if verboseLevel > 0 {
fuzzLogger.Printf("mysqld init command: %v", mysqldInit.String())
}
err = mysqldInit.Run()
if err != nil {
fuzzLogger.Panic("failed to initialize mysqld:", err)
}
mysqld := exec.Command("mysqld",
fmt.Sprintf("--datadir=%s", mysqlDataDir),
fmt.Sprintf("--pid-file=%s", mysqlPidFile),
fmt.Sprintf("--socket=%s", mysqlSockName),
fmt.Sprintf("--log-error=%s", mysqlLogFile),
fmt.Sprintf("--slow-query-log-file=%s", mysqlSlowLogFile),
"--skip-networking",
"--mysqlx=0")
if verboseLevel > 0 {
fuzzLogger.Printf("mysqld start command: %v", mysqld.String())
}
err = mysqld.Start()
if err != nil {
fuzzLogger.Panic("failed to start mysqld:", err)
}
tc, mc := make(chan *sql.DB), make(chan *sql.DB)
go sqlConnect(sockName, tc)
go sqlConnect(mysqlSockName, mc)
tidbConn, mysqlConn = <-tc, <-mc
syncSqlMode()
fuzzLogger.Printf("succeed to start tidb (in %v) and mysql (in %v) for fuzz", instanceDir, mysqlInstanceDir)
fmt.Println(instanceDir) // to notify go fuzz
}
func syncSqlMode() {
var sqlMode string
err = tidbConn.QueryRow("select @@sql_mode").Scan(&sqlMode)
if err != nil {
fuzzLogger.Panic("failed to get sql_mode from tidb:", err)
}
// mysql does not support NO_AUTO_CREATE_USER
sqlMode = strings.ReplaceAll(sqlMode, "NO_AUTO_CREATE_USER", "")
_, err = mysqlConn.Exec(fmt.Sprintf("set sql_mode = '%s'", sqlMode))
if err != nil {
fuzzLogger.Panic("failed to set sql_mode for mysql:", err)
}
}
func sqlConnect(sockName string, cc chan *sql.DB) {
var conn *sql.DB
for i := 0; i < 5; i++ {
if _, err := os.Stat(sockName); os.IsNotExist(err) {
time.Sleep(time.Second)
continue
}
conn, err = sql.Open("mysql", fmt.Sprintf("root@unix(%s)/", sockName))
if err != nil {
time.Sleep(time.Second)
} else {
break
}
}
if err != nil {
fuzzLogger.Panicf("%s not up after 5 seconds", sockName)
}
_, _ = conn.Exec("create database test") // useful in mysql
_, err = conn.Exec("use test")
if err != nil {
fuzzLogger.Panicf("%s failed to use database `test`: %v", sockName, err)
}
cc <- conn
}
func isSelect(sql string) bool {
sql = strings.TrimLeft(sql, " (\n")
sql = strings.ToLower(sql)
return strings.HasPrefix(sql, "select") || strings.HasPrefix(sql, "with")
}
func isCreate(sql string) bool {
sql = strings.TrimLeft(sql, " (\n")
sql = strings.ToLower(sql)
return strings.HasPrefix(sql, "create")
}
// Fuzz is the required name by go-fuzz
func Fuzz(raw []byte) int {
query := string(raw)
// fmt.Println(query)
tidbErr, mysqlErr := make(chan error), make(chan error)
if isSelect(query) {
if verboseLevel > 0 {
fuzzLogger.Printf("[query] %v", query)
}
exec := func(conn *sql.DB, rows **sql.Rows, ec chan error) {
var err error
*rows, err = conn.Query(query)
ec <- err
}
var tidbRows, mysqlRows *sql.Rows
go exec(tidbConn, &tidbRows, tidbErr)
go exec(mysqlConn, &mysqlRows, mysqlErr)
te := <-tidbErr
me := <-mysqlErr
if te != nil || me != nil {
if te != nil && me != nil {
if te.Error() != me.Error() {
fuzzLogger.Panic(fmt.Sprintf("[both err] tidb: %v; mysql: %v", te, me))
} else {
return 0
}
} else {
fuzzLogger.Panic(fmt.Sprintf("[one side err] tidb: %v; mysql: %v", te, me))
}
}
tidbSR, err := comparer.NewSortedRows(tidbRows)
if err != nil {
fuzzLogger.Panic("failed to create sorted rows for tidb:", err)
}
mysqlSR, err := comparer.NewSortedRows(mysqlRows)
if err != nil {
fuzzLogger.Panic("failed to create sorted rows for mysql:", err)
}
k, l, r := comparer.DiffMetaInfo(tidbSR, mysqlSR)
if k != comparer.NoDiff {
fuzzLogger.Panic(fmt.Sprintf("[metainfo diff (%v)] tidb: %v, mysql: %v\n", k, l, r))
}
dr := comparer.DiffData(tidbSR, mysqlSR)
if dr != nil {
fuzzLogger.Panic(fmt.Sprintf("[data diff] %v", dr))
}
} else {
if verboseLevel > 0 {
fuzzLogger.Printf("[ddl/dml] %v", query)
}
exec := func(conn *sql.DB, ec chan error) {
_, err := conn.Exec(query)
ec <- err
}
go exec(tidbConn, tidbErr)
go exec(mysqlConn, mysqlErr)
te := <-tidbErr
me := <-mysqlErr
// assume that ddls are correct
if te != nil || me != nil {
if isCreate(query) {
fuzzLogger.Panicf("[ddl fatal error] stmt: %v; tidb: %v; mysql: %v\n", query, te, me)
} else {
if te != nil && me != nil && te.Error() == me.Error() {
fuzzLogger.Printf("[dml error] stmt: %v; tidb: %v; mysql: %v\n", query, te, me)
} else {
fuzzLogger.Panicf("[dml fatal error] stmt: %v; tidb: %v; mysql: %v\n", query, te, me)
}
}
}
}
return 1
}
| [
"\"TIFUZZ_VERBOSE\""
]
| []
| [
"TIFUZZ_VERBOSE"
]
| [] | ["TIFUZZ_VERBOSE"] | go | 1 | 0 | |
cmd/lncli/main.go | // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
macaroon "gopkg.in/macaroon.v2"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/urfave/cli"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
defaultDataDir = "data"
defaultChainSubDir = "chain"
defaultTLSCertFilename = "tls.cert"
defaultMacaroonFilename = "admin.macaroon"
defaultRPCPort = "10009"
defaultRPCHostPort = "localhost:" + defaultRPCPort
)
var (
defaultLndDir = btcutil.AppDataDir("lnd", false)
defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename)
// maxMsgRecvSize is the largest message our client will receive. We
// set this to ~50Mb atm.
maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 50)
)
func fatal(err error) {
fmt.Fprintf(os.Stderr, "[lncli] %v\n", err)
os.Exit(1)
}
func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) {
conn := getClientConn(ctx, true)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewWalletUnlockerClient(conn), cleanUp
}
func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) {
conn := getClientConn(ctx, false)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewLightningClient(conn), cleanUp
}
func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn {
// First, we'll parse the args from the command.
tlsCertPath, macPath, err := extractPathArgs(ctx)
if err != nil {
fatal(err)
}
// Load the specified TLS certificate and build transport credentials
// with it.
creds, err := credentials.NewClientTLSFromFile(tlsCertPath, "")
if err != nil {
fatal(err)
}
// Create a dial options array.
opts := []grpc.DialOption{
grpc.WithTransportCredentials(creds),
}
// Only process macaroon credentials if --no-macaroons isn't set and
// if we're not skipping macaroon processing.
if !ctx.GlobalBool("no-macaroons") && !skipMacaroons {
// Load the specified macaroon file.
macBytes, err := ioutil.ReadFile(macPath)
if err != nil {
fatal(fmt.Errorf("unable to read macaroon path (check "+
"the network setting!): %v", err))
}
mac := &macaroon.Macaroon{}
if err = mac.UnmarshalBinary(macBytes); err != nil {
fatal(fmt.Errorf("unable to decode macaroon: %v", err))
}
macConstraints := []macaroons.Constraint{
// We add a time-based constraint to prevent replay of the
// macaroon. It's good for 60 seconds by default to make up for
// any discrepancy between client and server clocks, but leaking
// the macaroon before it becomes invalid makes it possible for
// an attacker to reuse the macaroon. In addition, the validity
// time of the macaroon is extended by the time the server clock
// is behind the client clock, or shortened by the time the
// server clock is ahead of the client clock (or invalid
// altogether if, in the latter case, this time is more than 60
// seconds).
// TODO(aakselrod): add better anti-replay protection.
macaroons.TimeoutConstraint(ctx.GlobalInt64("macaroontimeout")),
// Lock macaroon down to a specific IP address.
macaroons.IPLockConstraint(ctx.GlobalString("macaroonip")),
// ... Add more constraints if needed.
}
// Apply constraints to the macaroon.
constrainedMac, err := macaroons.AddConstraints(mac, macConstraints...)
if err != nil {
fatal(err)
}
// Now we append the macaroon credentials to the dial options.
cred := macaroons.NewMacaroonCredential(constrainedMac)
opts = append(opts, grpc.WithPerRPCCredentials(cred))
}
// We need to use a custom dialer so we can also connect to unix sockets
// and not just TCP addresses.
genericDialer := lncfg.ClientAddressDialer(defaultRPCPort)
opts = append(opts, grpc.WithDialer(genericDialer))
opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize))
conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...)
if err != nil {
fatal(fmt.Errorf("unable to connect to RPC server: %v", err))
}
return conn
}
// extractPathArgs parses the TLS certificate and macaroon paths from the
// command.
func extractPathArgs(ctx *cli.Context) (string, string, error) {
// We'll start off by parsing the active chain and network. These are
// needed to determine the correct path to the macaroon when not
// specified.
chain := strings.ToLower(ctx.GlobalString("chain"))
switch chain {
case "bitcoin", "litecoin":
default:
return "", "", fmt.Errorf("unknown chain: %v", chain)
}
network := strings.ToLower(ctx.GlobalString("network"))
switch network {
case "mainnet", "testnet", "regtest", "simnet":
default:
return "", "", fmt.Errorf("unknown network: %v", network)
}
// We'll now fetch the lnddir so we can make a decision on how to
// properly read the macaroons (if needed) and also the cert. This will
// either be the default, or will have been overwritten by the end
// user.
lndDir := cleanAndExpandPath(ctx.GlobalString("lnddir"))
// If the macaroon path as been manually provided, then we'll only
// target the specified file.
var macPath string
if ctx.GlobalString("macaroonpath") != "" {
macPath = cleanAndExpandPath(ctx.GlobalString("macaroonpath"))
} else {
// Otherwise, we'll go into the path:
// lnddir/data/chain/<chain>/<network> in order to fetch the
// macaroon that we need.
macPath = filepath.Join(
lndDir, defaultDataDir, defaultChainSubDir, chain,
network, defaultMacaroonFilename,
)
}
tlsCertPath := cleanAndExpandPath(ctx.GlobalString("tlscertpath"))
// If a custom lnd directory was set, we'll also check if custom paths
// for the TLS cert and macaroon file were set as well. If not, we'll
// override their paths so they can be found within the custom lnd
// directory set. This allows us to set a custom lnd directory, along
// with custom paths to the TLS cert and macaroon file.
if lndDir != defaultLndDir {
tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
}
return tlsCertPath, macPath, nil
}
func main() {
app := cli.NewApp()
app.Name = "lncli"
app.Version = build.Version()
app.Usage = "control plane for your Lightning Network Daemon (lnd)"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "rpcserver",
Value: defaultRPCHostPort,
Usage: "host:port of ln daemon",
},
cli.StringFlag{
Name: "lnddir",
Value: defaultLndDir,
Usage: "path to lnd's base directory",
},
cli.StringFlag{
Name: "tlscertpath",
Value: defaultTLSCertPath,
Usage: "path to TLS certificate",
},
cli.StringFlag{
Name: "chain, c",
Usage: "the chain lnd is running on e.g. bitcoin",
Value: "bitcoin",
},
cli.StringFlag{
Name: "network, n",
Usage: "the network lnd is running on e.g. mainnet, " +
"testnet, etc.",
Value: "mainnet",
},
cli.BoolFlag{
Name: "no-macaroons",
Usage: "disable macaroon authentication",
},
cli.StringFlag{
Name: "macaroonpath",
Usage: "path to macaroon file",
},
cli.Int64Flag{
Name: "macaroontimeout",
Value: 60,
Usage: "anti-replay macaroon validity time in seconds",
},
cli.StringFlag{
Name: "macaroonip",
Usage: "if set, lock macaroon to specific IP address",
},
}
app.Commands = []cli.Command{
createCommand,
unlockCommand,
changePasswordCommand,
newAddressCommand,
estimateFeeCommand,
sendManyCommand,
sendCoinsCommand,
listUnspentCommand,
connectCommand,
disconnectCommand,
openChannelCommand,
closeChannelCommand,
closeAllChannelsCommand,
abandonChannelCommand,
listPeersCommand,
walletBalanceCommand,
channelBalanceCommand,
getInfoCommand,
pendingChannelsCommand,
sendPaymentCommand,
payInvoiceCommand,
sendToRouteCommand,
addInvoiceCommand,
lookupInvoiceCommand,
listInvoicesCommand,
listChannelsCommand,
closedChannelsCommand,
listPaymentsCommand,
describeGraphCommand,
getChanInfoCommand,
getNodeInfoCommand,
queryRoutesCommand,
getNetworkInfoCommand,
debugLevelCommand,
decodePayReqCommand,
listChainTxnsCommand,
stopCommand,
signMessageCommand,
verifyMessageCommand,
feeReportCommand,
updateChannelPolicyCommand,
forwardingHistoryCommand,
exportChanBackupCommand,
verifyChanBackupCommand,
restoreChanBackupCommand,
}
// Add any extra commands determined by build flags.
app.Commands = append(app.Commands, autopilotCommands()...)
app.Commands = append(app.Commands, invoicesCommands()...)
app.Commands = append(app.Commands, routerCommands()...)
app.Commands = append(app.Commands, walletCommands()...)
app.Commands = append(app.Commands, watchtowerCommands()...)
app.Commands = append(app.Commands, wtclientCommands()...)
if err := app.Run(os.Args); err != nil {
fatal(err)
}
}
// cleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
// This function is taken from https://github.com/btcsuite/btcd
func cleanAndExpandPath(path string) string {
if path == "" {
return ""
}
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
var homeDir string
user, err := user.Current()
if err == nil {
homeDir = user.HomeDir
} else {
homeDir = os.Getenv("HOME")
}
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but the variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
frappe/app.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
import MySQLdb
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.contrib.profiler import ProfilerMiddleware
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.serving import run_with_reloader
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.async
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name, get_site_path
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.communication.comment import update_comments_in_parent_after_request
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
if frappe.local.form_dict.data is None:
frappe.local.form_dict.data = request.get_data()
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ('GET', 'HEAD'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException, e:
frappe.logger().error('Request Error', exc_info=True)
return e
except frappe.SessionStopped, e:
response = frappe.utils.response.handle_session_stopped()
except Exception, e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
raise frappe.SessionStopped
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
def make_form_dict(request):
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in (request.form or request.args).iteritems() })
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
http_status_code = getattr(e, "http_status_code", 500)
if (http_status_code==500
and isinstance(e, MySQLdb.OperationalError)
and e.args[0] in (1205, 1213)):
# 1205 = lock wait timeout
# 1213 = deadlock
# code 409 represents conflict
http_status_code = 508
if frappe.local.is_ajax or 'application/json' in frappe.local.request.headers.get('Accept', ''):
response = frappe.utils.response.report_error(http_status_code)
else:
traceback = "<pre>"+frappe.get_traceback()+"</pre>"
if frappe.local.flags.disable_traceback:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback,
http_status_code=http_status_code)
response = frappe.website.render.render("message", http_status_code=http_status_code)
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
frappe.logger().error('Request Error', exc_info=True)
make_error_snapshot(e)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
b'/assets': os.path.join(sites_path, 'assets').encode("utf-8"),
})
application = StaticDataMiddleware(application, {
b'/files': os.path.abspath(sites_path).encode("utf-8")
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
run_simple('0.0.0.0', int(port), application, use_reloader=True,
use_debugger=True, use_evalex=True, threaded=True)
| []
| []
| [
"NO_STATICS",
"SITES_PATH"
]
| [] | ["NO_STATICS", "SITES_PATH"] | python | 2 | 0 | |
main.go | package main
import (
"log"
"os"
"strconv"
"time"
_ "compgen-api-docs/routers"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/plugins/cors"
_ "github.com/go-sql-driver/mysql"
)
func init() {
orm.RegisterDriver("mysql", orm.DRMySQL)
dbURL := beego.AppConfig.String("mysqluser") + ":" + beego.AppConfig.String("mysqlpass") + "@tcp(" + beego.AppConfig.String("mysqlurls") + ":3306)/" + beego.AppConfig.String("mysqldb")
orm.RegisterDataBase("default", "mysql", dbURL)
orm.DefaultTimeLoc, _ = time.LoadLocation("America/Sao_Paulo")
orm.Debug = true
}
func main() {
if os.Getenv("PORT") != "" {
log.Println("Env $PORT :", os.Getenv("PORT"))
port, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
log.Fatal(err)
log.Fatal("$PORT must be set")
}
log.Println("port : ", port)
beego.BConfig.Listen.HTTPSPort = port
}
if os.Getenv("BEEGO_ENV") != "" {
log.Println("Env $BEEGO_ENV :", os.Getenv("BEEGO_ENV"))
beego.BConfig.RunMode = os.Getenv("BEEGO_ENV")
}
if beego.BConfig.RunMode == "dev" {
beego.BConfig.WebConfig.DirectoryIndex = true
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
}
beego.InsertFilter("*", beego.BeforeRouter, cors.Allow(&cors.Options{
AllowOrigins: []string{"localhost:8080", "localhost:3000", "*"},
AllowMethods: []string{"PUT", "PATCH", "GET", "POST", "OPTIONS", "DELETE"},
AllowHeaders: []string{"Origin", "Access-Control-Allow-Origin", "Access-Control-Allow-Headers", "X-Requested-With", "Content-Type", "Accept", "Connection", "Upgrade", "Token", "Authorization", "Websocket", "Set-Cookie", "withCredentials"},
ExposeHeaders: []string{"Content-Length", "Access-Control-Allow-Origin", "Origin", "Connection", "Upgrade", "Token", "Authorization", "Websocket", "Set-Cookie", "withCredentials"},
AllowCredentials: true,
}))
beego.Run()
}
| [
"\"PORT\"",
"\"PORT\"",
"\"PORT\"",
"\"BEEGO_ENV\"",
"\"BEEGO_ENV\"",
"\"BEEGO_ENV\""
]
| []
| [
"PORT",
"BEEGO_ENV"
]
| [] | ["PORT", "BEEGO_ENV"] | go | 2 | 0 | |
pkg/objstore/oss/oss.go | package oss
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net/http"
"os"
"strconv"
"strings"
"testing"
"time"
alioss "github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/thanos-io/thanos/pkg/objstore"
"gopkg.in/yaml.v2"
)
// Part size for multi part upload.
const PartSize = 1024 * 1024 * 128
// Config stores the configuration for oss bucket.
type Config struct {
Endpoint string `yaml:"endpoint"`
Bucket string `yaml:"bucket"`
AccessKeyID string `yaml:"access_key_id"`
AccessKeySecret string `yaml:"access_key_secret"`
}
// Bucket implements the store.Bucket interface.
type Bucket struct {
name string
logger log.Logger
client *alioss.Client
config Config
bucket *alioss.Bucket
}
func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) {
c := Config{
Endpoint: os.Getenv("ALIYUNOSS_ENDPOINT"),
Bucket: os.Getenv("ALIYUNOSS_BUCKET"),
AccessKeyID: os.Getenv("ALIYUNOSS_ACCESS_KEY_ID"),
AccessKeySecret: os.Getenv("ALIYUNOSS_ACCESS_KEY_SECRET"),
}
if c.Endpoint == "" || c.AccessKeyID == "" || c.AccessKeySecret == "" {
return nil, nil, errors.New("aliyun oss endpoint or access_key_id or access_key_secret " +
"is not present in config file")
}
if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "true" {
t.Log("ALIYUNOSS_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset ALIYUNOSS_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true.")
return NewTestBucketFromConfig(t, c, true)
}
return NewTestBucketFromConfig(t, c, false)
}
func calculateChunks(name string, r io.Reader) (int, int64, error) {
switch f := r.(type) {
case *os.File:
if fileInfo, err := f.Stat(); err == nil {
s := fileInfo.Size()
return int(math.Floor(float64(s) / PartSize)), s % PartSize, nil
}
case *strings.Reader:
return int(math.Floor(float64(f.Size()) / PartSize)), f.Size() % PartSize, nil
}
return -1, 0, errors.New("unsupported implement of io.Reader")
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
chunksnum, lastslice, err := calculateChunks(name, r)
if err != nil {
return err
}
ncloser := ioutil.NopCloser(r)
switch chunksnum {
case 0:
if err := b.bucket.PutObject(name, ncloser); err != nil {
return errors.Wrap(err, "failed to upload oss object")
}
default:
{
init, err := b.bucket.InitiateMultipartUpload(name)
if err != nil {
return errors.Wrap(err, "failed to initiate multi-part upload")
}
chunk := 0
uploadEveryPart := func(everypartsize int64, cnk int) (alioss.UploadPart, error) {
prt, err := b.bucket.UploadPart(init, ncloser, everypartsize, cnk)
if err != nil {
if err := b.bucket.AbortMultipartUpload(init); err != nil {
return prt, errors.Wrap(err, "failed to abort multi-part upload")
}
return prt, errors.Wrap(err, "failed to upload multi-part chunk")
}
return prt, nil
}
var parts []alioss.UploadPart
for ; chunk < chunksnum; chunk++ {
part, err := uploadEveryPart(PartSize, chunk+1)
if err != nil {
return errors.Wrap(err, "failed to upload every part")
}
parts = append(parts, part)
}
if lastslice != 0 {
part, err := uploadEveryPart(lastslice, chunksnum+1)
if err != nil {
return errors.Wrap(err, "failed to upload the last chunk")
}
parts = append(parts, part)
}
if _, err := b.bucket.CompleteMultipartUpload(init, parts); err != nil {
return errors.Wrap(err, "failed to set multi-part upload completive")
}
}
}
return nil
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
if err := b.bucket.DeleteObject(name); err != nil {
return errors.Wrap(err, "delete oss object")
}
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(ctx context.Context, name string) (uint64, error) {
// refer to https://github.com/aliyun/aliyun-oss-go-sdk/blob/cee409f5b4d75d7ad077cacb7e6f4590a7f2e172/oss/bucket.go#L668.
m, err := b.bucket.GetObjectMeta(name)
if err != nil {
return 0, err
}
if v, ok := m["Content-Length"]; ok {
if len(v) == 0 {
return 0, errors.New("content-length header has no values")
}
ret, err := strconv.ParseUint(v[0], 10, 64)
if err != nil {
return 0, errors.Wrap(err, "convert content-length")
}
return ret, nil
}
return 0, errors.New("content-length header not found")
}
// NewBucket returns a new Bucket using the provided oss config values.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
var config Config
if err := yaml.Unmarshal(conf, &config); err != nil {
return nil, errors.Wrap(err, "parse aliyun oss config file failed")
}
if config.Endpoint == "" || config.Bucket == "" || config.AccessKeyID == "" || config.AccessKeySecret == "" {
return nil, errors.New("aliyun oss endpoint or bucket or access_key_id or access_key_secret " +
"is not present in config file")
}
client, err := alioss.New(config.Endpoint, config.AccessKeyID, config.AccessKeySecret)
if err != nil {
return nil, errors.Wrap(err, "create aliyun oss client failed")
}
bk, err := client.Bucket(config.Bucket)
if err != nil {
return nil, errors.Wrapf(err, "use aliyun oss bucket %s failed", config.Bucket)
}
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
config: config,
bucket: bk,
}
return bkt, nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
marker := alioss.Marker("")
for {
if err := ctx.Err(); err != nil {
return errors.Wrap(err, "context closed while iterating bucket")
}
objects, err := b.bucket.ListObjects(alioss.Prefix(dir), alioss.Delimiter(objstore.DirDelim), marker)
if err != nil {
return errors.Wrap(err, "listing aliyun oss bucket failed")
}
marker = alioss.Marker(objects.NextMarker)
for _, object := range objects.Objects {
if err := f(object.Key); err != nil {
return errors.Wrapf(err, "callback func invoke for object %s failed ", object.Key)
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object); err != nil {
return errors.Wrapf(err, "callback func invoke for directory %s failed", object)
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
func (b *Bucket) Name() string {
return b.name
}
func NewTestBucketFromConfig(t testing.TB, c Config, reuseBucket bool) (objstore.Bucket, func(), error) {
if c.Bucket == "" {
src := rand.NewSource(time.Now().UnixNano())
bktToCreate := strings.Replace(fmt.Sprintf("test_%s_%x", strings.ToLower(t.Name()), src.Int63()), "_", "-", -1)
if len(bktToCreate) >= 63 {
bktToCreate = bktToCreate[:63]
}
testclient, err := alioss.New(c.Endpoint, c.AccessKeyID, c.AccessKeySecret)
if err != nil {
return nil, nil, errors.Wrap(err, "create aliyun oss client failed")
}
if err := testclient.CreateBucket(bktToCreate); err != nil {
return nil, nil, errors.Wrapf(err, "create aliyun oss bucket %s failed", bktToCreate)
}
c.Bucket = bktToCreate
}
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-aliyun-oss-test")
if err != nil {
return nil, nil, err
}
if reuseBucket {
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "oss check bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "Aliyun OSS bucket for OSS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
return b, func() {
objstore.EmptyBucket(t, context.Background(), b)
if err := b.client.DeleteBucket(c.Bucket); err != nil {
t.Logf("deleting bucket %s failed: %s", c.Bucket, err)
}
}, nil
}
func (b *Bucket) Close() error { return nil }
func (b *Bucket) setRange(start, end int64, name string) (alioss.Option, error) {
var opt alioss.Option
if 0 <= start && start <= end {
header, err := b.bucket.GetObjectMeta(name)
if err != nil {
return nil, err
}
size, err := strconv.ParseInt(header["Content-Length"][0], 10, 64)
if err != nil {
return nil, err
}
if end > size {
end = size - 1
}
opt = alioss.Range(start, end)
} else {
return nil, errors.Errorf("Invalid range specified: start=%d end=%d", start, end)
}
return opt, nil
}
func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
if len(name) == 0 {
return nil, errors.New("given object name should not empty")
}
var opts []alioss.Option
if length != -1 {
opt, err := b.setRange(off, off+length-1, name)
if err != nil {
return nil, err
}
opts = append(opts, opt)
}
resp, err := b.bucket.GetObject(name, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, name, 0, -1)
}
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
exists, err := b.bucket.IsObjectExist(name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrap(err, "cloud not check if object exists")
}
return exists, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch aliErr := err.(type) {
case alioss.ServiceError:
if aliErr.StatusCode == http.StatusNotFound {
return true
}
}
return false
}
| [
"\"ALIYUNOSS_ENDPOINT\"",
"\"ALIYUNOSS_BUCKET\"",
"\"ALIYUNOSS_ACCESS_KEY_ID\"",
"\"ALIYUNOSS_ACCESS_KEY_SECRET\"",
"\"THANOS_ALLOW_EXISTING_BUCKET_USE\""
]
| []
| [
"ALIYUNOSS_ACCESS_KEY_SECRET",
"ALIYUNOSS_ENDPOINT",
"ALIYUNOSS_ACCESS_KEY_ID",
"THANOS_ALLOW_EXISTING_BUCKET_USE",
"ALIYUNOSS_BUCKET"
]
| [] | ["ALIYUNOSS_ACCESS_KEY_SECRET", "ALIYUNOSS_ENDPOINT", "ALIYUNOSS_ACCESS_KEY_ID", "THANOS_ALLOW_EXISTING_BUCKET_USE", "ALIYUNOSS_BUCKET"] | go | 5 | 0 | |
keep/settings.py | """
Django settings for keep project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import dj_database_url
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i^eq^8t&41lduyy%(r)z3ku904f0@obcl*g_j@f4u=t$o7$po0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ALLOWED_HOSTS = [s.getsockname()[0], '127.0.0.1', 'google-keeps-clone.herokuapp.com']
s.close()
# Application definition
INSTALLED_APPS = [
'index',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'keep.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'keep.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = "index.User"
'''
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
'''
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
# Extra lookup directories for collectstatic to find static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db) | []
| []
| [
"COMPRESS_ENABLED"
]
| [] | ["COMPRESS_ENABLED"] | python | 1 | 0 | |
strongbox-web-core/src/main/java/org/carlspring/strongbox/controllers/environment/EnvironmentInfoController.java | package org.carlspring.strongbox.controllers.environment;
import org.carlspring.strongbox.controllers.BaseController;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.util.*;
import java.util.stream.Collectors;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiResponse;
import io.swagger.annotations.ApiResponses;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* @author Pablo Tirado
*/
@RestController
@PreAuthorize("hasAuthority('ADMIN')")
@RequestMapping("/api/configuration/environment/info")
@Api("/api/configuration/environment/info")
public class EnvironmentInfoController
extends BaseController
{
private ObjectMapper objectMapper;
public EnvironmentInfoController(ObjectMapper objectMapper)
{
this.objectMapper = objectMapper;
}
@ApiOperation(value = "List all the environment variables, system properties and JVM arguments.")
@ApiResponses(value = { @ApiResponse(code = 200, message = "The list was returned."),
@ApiResponse(code = 500, message = "An error occurred.") })
@GetMapping(produces = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity getEnvironmentInfo()
{
logger.debug("Listing of all environment variables, system properties and JVM arguments");
Map<String, List<?>> propertiesMap = new LinkedHashMap<>();
propertiesMap.put("environment", getEnvironmentVariables());
propertiesMap.put("system", getSystemProperties());
propertiesMap.put("jvm", getJvmArguments());
try
{
return ResponseEntity.ok(objectMapper.writeValueAsString(propertiesMap));
}
catch (JsonProcessingException e)
{
logger.error(e.getMessage(), e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(String.format("{ 'error': '%s' }", e.getMessage()));
}
}
private List<EnvironmentInfo> getEnvironmentVariables()
{
Map<String, String> environmentMap = System.getenv();
return environmentMap.entrySet().stream()
.sorted(Map.Entry.comparingByKey(String::compareToIgnoreCase))
.map(e -> new EnvironmentInfo(e.getKey(), e.getValue()))
.collect(Collectors.toList());
}
private List<EnvironmentInfo> getSystemProperties()
{
Properties systemProperties = System.getProperties();
return systemProperties.entrySet().stream()
.sorted(Comparator.comparing(e -> ((String) e.getKey()).toLowerCase()))
.map(e -> new EnvironmentInfo((String) e.getKey(), (String) e.getValue()))
.collect(Collectors.toList());
}
private List<String> getJvmArguments()
{
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
List<String> arguments = runtimeMxBean.getInputArguments();
return arguments.stream()
.sorted(String::compareToIgnoreCase)
.collect(Collectors.toList());
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
src/nninst/backend/tensorflow/attack/foolbox_attack_resnet_50_v2.py | import itertools
import os
from functools import partial
from foolbox.attacks import (
FGSM,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
from nninst import mode
from nninst.backend.tensorflow.attack.calc_density import trace_density
from nninst.backend.tensorflow.attack.calc_per_layer_metrics import (
get_per_layer_metrics,
)
from nninst.backend.tensorflow.attack.common import (
resnet_50_imagenet_real_metrics_per_layer,
resnet_50_imagenet_real_metrics_per_layer_targeted,
)
from nninst.backend.tensorflow.attack.cw_attack import cw_generate_adversarial_example
from nninst.backend.tensorflow.attack.cw_attacks import CarliniL2
from nninst.backend.tensorflow.attack.foolbox_attack import (
foolbox_generate_adversarial_example,
)
from nninst.backend.tensorflow.attack.random_attack import RandomAttack
from nninst.backend.tensorflow.dataset import imagenet
from nninst.backend.tensorflow.dataset.imagenet_preprocessing import _CHANNEL_MEANS
from nninst.backend.tensorflow.model.config import RESNET_50
from nninst.backend.tensorflow.trace.resnet_50_imagenet_class_trace_v3 import (
resnet_50_imagenet_class_trace_compact,
)
from nninst.backend.tensorflow.trace.utils import get_variant
from nninst.statistics import calc_trace_side_overlap_both_compact
from nninst.trace import (
TraceKey,
density_name,
early_stop_hook,
get_trace,
get_type2_trace,
get_type4_trace,
)
from nninst.utils.alternative import alt, alts
from nninst.utils.numpy import arg_approx
from nninst.utils.ray import ray_init
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
if __name__ == "__main__":
# mode.debug()
# mode.distributed()
mode.local()
# ray_init("dell")
ray_init()
label = None
# label = "best_in_10"
# label = "worst_in_10"
# label = "import"
# label = "norm"
variant = None
# variant = "intersect"
use_weight = False
# use_weight = True
print(f"attack model with label {label} using Foolbox")
attack_name = alt(
"normal",
"DeepFool",
# "Adaptive_layer1",
# "Adaptive_layer2",
# "Adaptive_layer3",
# "Adaptive_layer4",
# "Adaptive_layer5",
# "Adaptive_layer6",
# "Adaptive_layer7",
# "Adaptive_layer8",
# "Adaptive_layer9",
# "Adaptive_cos_layer9",
# "Adaptive_layer4",
# "Adaptive_return_late",
# "Adaptive_random_start",
# "Adaptive_iterations_400",
# "Adaptive_layer4_iterations_400",
"FGSM",
# "FGSM_targeted",
# "FGSM_iterative_targeted",
"BIM",
"JSMA",
"CWL2",
# "CWL2_confidence=3.5",
# "CWL2_confidence=14",
# "CWL2_confidence=28",
# "CWL2_target=500",
# "CWL2_confidence=28_target=500",
# "CWL2_confidence=28_target=500",
# "patch",
# "patch_scale=0.1",
# "patch_scale=0.2",
# "patch_scale=0.3",
# "patch_scale=0.4",
# "patch_scale=0.5",
# "new_patch_scale=0.1",
# "new_patch_scale=0.2",
# "new_patch_scale=0.3",
# "new_patch_scale=0.4",
# "new_patch_scale=0.5",
# "negative_example",
# "negative_example_top5",
# "negative_example_out_of_top5",
# "Random",
)
topk_share_range = alt(
# 2,
# 3,
# 5,
# 6,
# 7,
# 8,
9,
# 10,
# 20,
)
example_num = alt(
# 100,
# 400,
# 700,
# 1000,
0
)
rank = alt(
# None,
1,
2,
# 3,
# 4,
# 5,
# 6,
# 7,
# 8,
# 9,
# 10,
)
early_stop_layer_num = alt(
# None,
# 10,
12,
)
use_point = alt(
# True,
False
)
compare_with_full = alt(
False,
# True,
)
for threshold, absolute_threshold in itertools.product(
[
# 1.0,
# 0.9,
# 0.7,
0.5,
# 0.3,
# 0.1,
],
[
None,
# 0.05,
# 0.1,
# 0.2,
# 0.3,
# 0.4,
],
):
per_layer_metrics = lambda: get_per_layer_metrics(
RESNET_50, threshold=threshold, absolute_threshold=absolute_threshold
)
trace_fn, trace_label, trace_type, trace_parameter = alts(
[get_trace, None, None, None],
# [get_channel_trace, "per_channel", "class_channel_trace", None],
# [
# partial(get_type2_trace, output_threshold=per_layer_metrics()),
# f"type2_density_from_{threshold:.1f}",
# "type2_trace",
# f"density_from_{threshold:.1f}",
# ],
# [
# partial(get_type3_trace, input_threshold=per_layer_metrics()),
# f"type3_density_from_{threshold:.1f}",
# "type3_trace",
# f"density_from_{threshold:.1f}",
# ],
[
partial(
get_type4_trace,
output_threshold=per_layer_metrics(),
input_threshold=per_layer_metrics(),
),
f"type4_density_from_{threshold:.1f}",
"type4_trace",
f"density_from_{threshold:.1f}",
],
# [
# partial(
# get_type4_trace,
# output_threshold=per_layer_metrics(),
# input_threshold=per_layer_metrics(),
# ),
# f"type4_density_from_{threshold:.1f}_absolute_{absolute_threshold:.2f}",
# "type4_trace",
# f"density_from_{threshold:.1f}_absolute_{absolute_threshold:.2f}",
# ],
# [
# partial(get_unstructured_trace, density=per_layer_metrics()),
# f"unstructured_density_from_{threshold:.1f}",
# "unstructured_class_trace",
# f"density_from_{threshold:.1f}",
# ],
# [
# partial(
# get_per_receptive_field_unstructured_trace,
# output_threshold=per_layer_metrics(),
# ),
# f"per_receptive_field_unstructured_density_from_{threshold:.1f}",
# "per_receptive_field_unstructured_class_trace",
# f"density_from_{threshold:.1f}",
# ],
# [
# partial(
# get_type7_trace,
# density=per_layer_metrics(),
# input_threshold=per_layer_metrics(),
# ),
# f"type7_density_from_{threshold:.1f}",
# "type7_trace",
# f"density_from_{threshold:.1f}",
# ],
# [
# partial(
# get_per_input_unstructured_trace,
# output_threshold=per_layer_metrics(),
# input_threshold=per_layer_metrics(),
# ),
# f"per_input_unstructured_density_from_{threshold:.1f}",
# "per_input_unstructured_class_trace",
# f"density_from_{threshold:.1f}",
# ],
# *hybrid_backward_traces
)
for config in (
attack_name
* (trace_fn | trace_label | trace_type | trace_parameter)
* topk_share_range
* example_num
* rank
* use_point
* compare_with_full
* early_stop_layer_num
):
with config:
print(f"config: {list(config.values())}")
topk_calc_range = 2
variant = get_variant(
example_num=example_num.value,
early_stop_layer_num=early_stop_layer_num.value,
)
if variant is not None:
label_name = f"{label}_{variant}"
else:
label_name = label
if use_weight:
label_name = f"{label_name}_weight"
elif use_point.value:
label_name = f"{label_name}_point"
if compare_with_full.value:
label_name = f"{label_name}_vs_full"
if trace_label.value is not None:
label_name = f"{label_name}_{trace_label.value}"
if rank.value is not None:
label_name = f"{label_name}_rank{rank.value}"
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio(
# attack_fn=attacks[attack_name][0],
# generate_adversarial_fn=generate_adversarial_example,
# class_trace_fn=lambda class_id: alexnet_imagenet_class_trace_compact(class_id, threshold, label=label),
# # class_trace_fn=lambda class_id: lenet_mnist_class_trace(class_id, threshold),
# select_fn=lambda input: arg_approx(input, threshold),
# overlap_fn=calc_trace_side_overlap_compact,
# # overlap_fn=calc_iou,
# # overlap_fn=calc_class_trace_side_overlap,
# # overlap_fn=calc_class_trace_side_overlap_norm,
# # overlap_fn=calc_weighted_iou,
# path='alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.csv'.format(
# threshold, attack_name, label),
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.iou.csv'.format(threshold, attack_name, label),
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.wo_pool.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side_norm.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.weighted_iou.csv'.format(
# # threshold, attack_name, label),
# preprocessing=(_CHANNEL_MEANS, 1),
# **(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
# )
# alexnet_overlap_ratio.save()
# print("edge:")
# summary = get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.EDGE)
# summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.csv".format(
# threshold=threshold, label=label)
# file_exists = os.path.exists(summary_file)
# with open(summary_file, "a") as csv_file:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# if not file_exists:
# writer.writeheader()
# writer.writerow({"attack": attack_name, **summary})
# print(summary)
# print("weight:")
# print(get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.WEIGHT))
# print("point:")
# print(get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.POINT))
# for overlay_threshold in np.arange(0, 1.01, 0.01):
# # summary = get_overlay_summary(alexnet_overlap_ratio.load(), TraceKey.EDGE, overlay_threshold)
# summary = get_overlay_summary(alexnet_overlap_ratio.load(), TraceKey.WEIGHT, overlay_threshold)
# summary_file = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.iou.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.wo_pool.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side_norm.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.weighted_iou.csv".format(
# threshold=threshold, attack=attack_name, label=label)
# file_exists = os.path.exists(summary_file)
# with open(summary_file, "a") as csv_file:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# if not file_exists:
# writer.writeheader()
# writer.writerow({"attack": attack_name, **summary})
# overlap_fn = calc_trace_side_overlap_compact
overlap_fn = calc_trace_side_overlap_both_compact
# overlap_fn = calc_weighted_iou
# overlap_fn = calc_class_trace_side_overlap_compact
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_same_class_trace.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_all.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_all.foolbox.csv"
# path_template = ("alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top"
# + str(topk_share_range)
# + "_diff_all_uint8.foolbox.csv")
# path_template = ("alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top"
# + str(topk_share_range)
# + "_diff_all.foolbox.csv")
# path_template = ("alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top"
# + str(topk_share_range)
# + "_logit_diff.foolbox.csv")
# path_template = "alexnet_imagenet_ideal_metrics_{0:.1f}_{1}_{2}.csv"
# path_template = "alexnet_imagenet_fc_layer_path_ideal_metrics_{0:.1f}_{1}_{2}.csv"
# path_template = "alexnet_imagenet_ideal_metrics_per_layer_{0:.1f}_{1}_{2}.csv"
path_template = (
"resnet_50_imagenet_real_metrics_per_layer_{0:.1f}_{1}_{2}.csv"
)
# path_template = "resnet_50_imagenet_real_metrics_per_layer_targeted0_{0:.1f}_{1}_{2}.csv"
# path_template = "resnet_50_imagenet_real_metrics_per_layer_targeted500_{0:.1f}_{1}_{2}.csv"
# path_template = "resnet_50_imagenet_real_metrics_per_layer_targeted800_{0:.1f}_{1}_{2}.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top2_diff_all.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_unique.foolbox.csv"
# path_template = "alexnet_imagenet_train_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_unique.weight.foolbox.csv"
# path_template = "alexnet_imagenet_train_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_unique.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_all_online.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_per_node_{0:.1f}_{1}_{2}_top5_diff.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_train.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_weighted_iou.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_weighted_iou_class_0.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_per_node_{0:.1f}_{1}_{2}_weighted_iou_class_0.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_per_node_{0:.1f}_{1}_{2}_weighted_iou_class_0.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_per_node_{0:.1f}_{1}_{2}_weighted_iou.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_per_node_{0:.1f}_{1}_{2}_weighted_iou.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_per_node_{0:.1f}_{1}_{2}.foolbox.csv"
# path_template = "alexnet_imagenet_class_channel_overlap_ratio_{0:.1f}_{1}_{2}_class_0.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_full.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_train_in_trace.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_train_not_merged.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_all.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_error.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_rand.foolbox.csv"
# path_template = "alexnet_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_rand.foolbox.csv"
per_node = False
# per_node = True
# per_channel = True
per_channel = False
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_top5_diff_uint8(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_top5_diff(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_logit_diff(
# alexnet_overlap_ratio = alexnet_imagenet_ideal_metrics(
# alexnet_overlap_ratio = alexnet_imagenet_fc_layer_path_ideal_metrics(
# alexnet_overlap_ratio = alexnet_imagenet_negative_example_ideal_metrics_per_layer(
# alexnet_overlap_ratio = alexnet_imagenet_ideal_metrics_per_layer(
resnet_50_overlap_ratio = resnet_50_imagenet_real_metrics_per_layer(
# resnet_50_overlap_ratio = resnet_50_imagenet_real_metrics_per_layer_targeted(target_class=0)(
# resnet_50_overlap_ratio = resnet_50_imagenet_real_metrics_per_layer_targeted(target_class=500)(
# resnet_50_overlap_ratio = resnet_50_imagenet_real_metrics_per_layer_targeted(target_class=800)(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_top5_unique(
attack_name=attack_name.value,
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_top5(
trace_fn=partial(
trace_fn.value,
select_fn=lambda input: arg_approx(input, threshold),
stop_hook=early_stop_hook(early_stop_layer_num.value)
if early_stop_layer_num.value is not None
else None,
),
# class_trace_fn=lambda class_id: alexnet_imagenet_class_trace(class_id, threshold, label=label),
# class_trace_fn=lambda class_id: alexnet_imagenet_class_trace_compact(class_id, threshold, label=label),
# class_trace_fn=lambda class_id: alexnet_imagenet_class_channel_trace_compact(
class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace_compact(
# class_trace_fn=lambda class_id: alexnet_imagenet_class_unique_trace_compact(
# class_trace_fn=lambda class_id: alexnet_imagenet_class_channel_trace(
# class_trace_fn=lambda class_id: alexnet_imagenet_class_trace(
class_id,
threshold,
label=label,
variant=variant,
trace_type=None
if compare_with_full.value
else trace_type.value,
trace_parameter=None
if compare_with_full.value
else trace_parameter.value,
),
# class_trace_fn=lambda class_id: lenet_mnist_class_trace(class_id, threshold),
# overlap_fn=calc_trace_side_overlap,
overlap_fn=overlap_fn,
# overlap_fn=calc_iou,
# overlap_fn=calc_class_trace_side_overlap,
# overlap_fn=calc_class_trace_side_overlap_norm,
# overlap_fn=calc_weighted_iou,
path="metrics/"
+ path_template.format(
# path='alexnet_imagenet_class_overlap_ratio_per_node_{0:.1f}_{1}_{2}.foolbox.csv'.format(
threshold,
attack_name.value,
label_name,
),
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.iou.csv'.format(threshold, attack_name, label),
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.wo_pool.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side_norm.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.weighted_iou.csv'.format(
# threshold, attack_name, label),
preprocessing=(_CHANNEL_MEANS, 1),
bounds=(0, 255),
channel_axis=3,
image_size=224,
class_num=1001,
norm_fn=imagenet.normalize,
data_format="channels_last",
per_node=per_node,
per_channel=per_channel,
topk_share_range=topk_share_range.value,
topk_calc_range=topk_calc_range,
use_weight=use_weight,
threshold=threshold,
rank=rank.value,
use_point=use_point.value,
label=label,
)
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_error(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_rand(
# alexnet_overlap_ratio = alexnet_imagenet_overlap_ratio_top5_rand(
# class_trace_fn=lambda class_id: alexnet_imagenet_class_trace_compact(class_id, threshold, label=label),
# select_fn=lambda input: arg_approx(input, threshold),
# overlap_fn=overlap_fn,
# path=path_template.format(threshold, attack_name, label),
# )
resnet_50_overlap_ratio.save()
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_same_class_trace.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all_compare.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all_compare.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top2_diff_all_compare.{key}.csv"
summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_unique_compare.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all_compare_online.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all_compare_filter.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_train.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_weighted_iou.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_weighted_iou_class_0.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_channel_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_class_0.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_full.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_train_in_trace.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_train_not_merged.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_all.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_error.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_rand.{key}.csv"
# summary_path_template = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_rand.{key}.csv"
# key = TraceKey.EDGE
# # summary_file = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.{key}.csv".format(
# summary_file = summary_path_template.format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.iou.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.wo_pool.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side_norm.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.weighted_iou.csv".format(
# threshold=threshold, attack=attack_name, label=label_name, key=key)
# with open(summary_file, "w") as csv_file:
# has_header = False
# for overlay_threshold in np.linspace(-1, 1, 201):
# # summary = get_overlay_summary(alexnet_overlap_ratio.load(), key, overlay_threshold)
# # summary = get_overlay_summary_top1(alexnet_overlap_ratio.load(), key, overlay_threshold)
# summary = get_overlay_summary_compare(alexnet_overlap_ratio.load(), key, float(overlay_threshold))
# # summary = get_overlay_summary_compare_filter(alexnet_overlap_ratio.load(), key, float(overlay_threshold))
# # summary = get_overlay_summary_one_side(alexnet_overlap_ratio.load(), key, overlay_threshold)
# if not has_header:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# writer.writeheader()
# has_header = True
# writer.writerow({"attack": attack_name, **summary})
#
# summary_file = summary_path_template.format(
# threshold=threshold, attack=attack_name, label=label_name, key="detail")
# get_overlay_summary_compare_detail(summary_file, alexnet_overlap_ratio.load(), from_zero=True).save()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
internal/editor/editor.go | // Package editor encapsulates working with external text editor.
package editor
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/logging"
)
var log = logging.GetContextLoggerFunc("editor")
// EditLoop launches OS-specific editor (VI, notepad.exe or another editor configured through environment variables)
// It creates a temporary file with 'initial' contents and repeatedly invokes the editor until the provided 'parse' function
// returns nil result indicating success. The 'parse' function is passed the contents of edited files without # line comments.
func EditLoop(ctx context.Context, fname, initial string, parse func(updated string) error) error {
tmpDir, err := ioutil.TempDir("", "kopia")
if err != nil {
return errors.Wrap(err, "unable to create temp directory")
}
tmpFile := filepath.Join(tmpDir, fname)
defer os.RemoveAll(tmpDir) //nolint:errcheck
if err := ioutil.WriteFile(tmpFile, []byte(initial), 0o600); err != nil {
return errors.Wrap(err, "unable to write file to edit")
}
for {
if err := editFile(ctx, tmpFile); err != nil {
return errors.Wrap(err, "error launching editor")
}
txt, err := readAndStripComments(tmpFile)
if err != nil {
return errors.Wrap(err, "error parsing edited file")
}
err = parse(txt)
if err == nil {
return nil
}
log(ctx).Errorf("%v", err)
fmt.Print("Reopen editor to fix? (Y/n) ") //nolint:forbidigo
var shouldReopen string
_, _ = fmt.Scanf("%s", &shouldReopen)
if strings.HasPrefix(strings.ToLower(shouldReopen), "n") {
return errors.New("aborted")
}
}
}
func readAndStripComments(fname string) (string, error) {
f, err := os.Open(fname) //nolint:gosec
if err != nil {
return "", errors.Wrap(err, "error opening edited file")
}
defer f.Close() //nolint:errcheck,gosec
var result []string
s := bufio.NewScanner(f)
for s.Scan() {
l := s.Text()
l = strings.TrimSpace(strings.Split(l, "#")[0])
if l != "" {
result = append(result, l)
}
}
return strings.Join(result, "\n"), nil
}
func editFile(ctx context.Context, file string) error {
editor, editorArgs := getEditorCommand()
var args []string
args = append(args, editorArgs...)
args = append(args, file)
cmd := exec.Command(editor, args...) //nolint:gosec
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
log(ctx).Debugf("launching editor %q on file %q", editor, file)
err := cmd.Run()
if err != nil {
return errors.Wrap(err, "error running editor command")
}
return nil
}
func getEditorCommand() (cmd string, args []string) {
editor := os.Getenv("VISUAL")
if editor == "" {
editor = os.Getenv("EDITOR")
}
if editor != "" {
return parseEditor(editor)
}
if runtime.GOOS == "windows" {
return "notepad.exe", nil
}
return "vi", nil
}
func parseEditor(s string) (cmd string, args []string) {
// quoted editor path
if s[0] == '"' {
p := strings.Index(s[1:], "\"")
if p == -1 {
// invalid
return s, nil
}
return s[1 : p+1], strings.Split(strings.TrimSpace(s[p+1:]), " ")
}
parts := strings.Split(s, " ")
return parts[0], parts[1:]
}
| [
"\"VISUAL\"",
"\"EDITOR\""
]
| []
| [
"VISUAL",
"EDITOR"
]
| [] | ["VISUAL", "EDITOR"] | go | 2 | 0 | |
docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = 'Alpa'
#copyright = '2022, <Author>'
#author = '<Author>'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_gallery.gen_gallery',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
# Note: we need to execute files that use distributed runtime before
# files that uses local runtime. Because all tutorials run on a single
# process, using local runtime will allocate all GPU memory on the driver
# script and leave no GPU memory for workers.
within_subsection_order = {
"tutorials": [
"pipeshard_parallelism.py",
"quickstart.py",
"alpa_vs_pmap.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# sphinx-gallery configuration
sphinx_gallery_conf = {
'examples_dirs': ['gallery/tutorials'],
'gallery_dirs': ['tutorials'],
'within_subsection_order': WithinSubsectionOrder,
'backreferences_dir': 'gen_modules/backreferences',
"filename_pattern": os.environ.get("ALPA_TUTORIAL_EXEC_PATTERN", r".py"),
}
# configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/', None),
}
# -- Monkey patch -------------------------------------------------
# Fix bugs in sphinx_gallery
import io
from sphinx_gallery import gen_rst
setattr(gen_rst._LoggingTee, "close", lambda x: x.restore_std())
def raise_io_error(*args):
raise io.UnsupportedOperation()
setattr(gen_rst._LoggingTee, "fileno", raise_io_error)
| []
| []
| [
"ALPA_TUTORIAL_EXEC_PATTERN"
]
| [] | ["ALPA_TUTORIAL_EXEC_PATTERN"] | python | 1 | 0 | |
platform-showcase/vault-integrated-service/cmd/service/main.go | package main
import (
"fmt"
"os"
"github.com/hashicorp/vault/api"
"github.com/joho/godotenv"
)
var (
token string
vaultAddr string
)
func init() {
_ = godotenv.Load(".platform/env/.env")
token = os.Getenv("COMPONENT_VAULT_VAULT_VAULT_DEV_ROOT_TOKEN_ID")
vaultAddr = os.Getenv("COMPONENT_VAULT_VAULT_VAULT_DEV_LISTEN_ADDRESS")
}
func main() {
config := &api.Config{
Address: "http://" + vaultAddr,
}
client, err := api.NewClient(config)
if err != nil {
fmt.Println(err)
return
}
client.SetToken(token)
input := map[string]interface{}{
"data": map[string]interface{}{
"foo": "foo",
},
}
_, err = client.Logical().Write("secret/data/foo", input)
if err != nil {
fmt.Println(err)
return
}
secret, err := client.Logical().Read("secret/data/foo")
if err != nil {
fmt.Println(err)
return
}
if secret == nil {
fmt.Println("secret not found")
return
}
m, ok := secret.Data["data"]
if !ok {
fmt.Printf("%T %#v\n", secret.Data["data"], secret.Data["data"])
return
}
fmt.Printf("foo: %v\n", m.(map[string]interface{})["foo"])
}
| [
"\"COMPONENT_VAULT_VAULT_VAULT_DEV_ROOT_TOKEN_ID\"",
"\"COMPONENT_VAULT_VAULT_VAULT_DEV_LISTEN_ADDRESS\""
]
| []
| [
"COMPONENT_VAULT_VAULT_VAULT_DEV_LISTEN_ADDRESS",
"COMPONENT_VAULT_VAULT_VAULT_DEV_ROOT_TOKEN_ID"
]
| [] | ["COMPONENT_VAULT_VAULT_VAULT_DEV_LISTEN_ADDRESS", "COMPONENT_VAULT_VAULT_VAULT_DEV_ROOT_TOKEN_ID"] | go | 2 | 0 | |
conanfile_base.py | # -*- coding: utf-8 -*-
from conans import ConanFile, AutoToolsBuildEnvironment, tools
from conans.errors import ConanException
import os
import shutil
import glob
class ConanFileBase(ConanFile):
version = "9.1.0"
description = "The GNU Compiler Collection includes front ends for C, C++, Objective-C, Fortran, Ada, Go, " \
"and D, as well as libraries for these languages (libstdc++,...)"
topics = ("conan", "gcc", "logging")
url = "https://github.com/bincrafters/conan-gcc"
homepage = "https://gcc.gnu.org/"
author = "Bincrafters <[email protected]>"
license = "GPL-3.0-or-later"
exports = ["LICENSE.md"]
exports_sources = ["patches/*.patch"]
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False], "languages": "ANY", "target": "ANY"}
default_options = {"shared": False, "fPIC": True, "languages": "c,c++", "target": None}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
no_copy_source = True
requires = ("gmp/6.1.2@bincrafters/stable",
"mpfr/4.0.2@bincrafters/stable",
"mpc/1.1.0@bincrafters/stable",
"zlib/1.2.11@conan/stable")
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def _apply_patches(self):
for filename in glob.glob("patches/*.patch"):
self.output.info('applying patch "%s"' % filename)
tools.patch(base_path=os.path.join(self.source_folder, self._source_subfolder), patch_file=filename)
def source(self):
# https://gcc.gnu.org/mirrors.html
mirrors = ["ftp://ftp.gnu.org/gnu/gcc",
"ftp://ftp.lip6.fr/pub/gcc/releases",
"ftp://ftp.irisa.fr/pub/mirrors/gcc.gnu.org/gcc/releases/",
"ftp://ftp.uvsq.fr/pub/gcc/releases/",
"http://mirrors-usa.go-parts.com/gcc/releases/",
"http://mirrors.concertpass.com/gcc/releases/"]
for mirror in mirrors:
try:
source_url = mirror + "/{n}-{v}/{n}-{v}.tar.gz".format(n="gcc", v=self.version)
tools.get(source_url, sha256="be303f7a8292982a35381489f5a9178603cbe9a4715ee4fa4a815d6bcd2b658d")
break
except ConanException:
pass
extracted_dir = "gcc-" + self.version
os.rename(extracted_dir, self._source_subfolder)
self._apply_patches()
@property
def _bash(self):
return os.environ.get("CONAN_BASH_PATH", tools.which("bash"))
@property
def _extra_configure_flags(self):
return []
def build(self):
libdir = "%s/lib/gcc/%s" % (self.package_folder, self.version)
tools.replace_in_file(os.path.join(self.source_folder,
self._source_subfolder, "gcc", "config", "i386", "t-linux64"),
"m64=../lib64", "m64=../lib", strict=False)
tools.replace_in_file(os.path.join(self.source_folder,
self._source_subfolder, "libgcc", "config", "t-slibgcc-darwin"),
"@shlib_slibdir@", libdir, strict=False)
pkgversion = "Conan GCC %s" % self.version
tools.rmdir(self._build_subfolder)
tools.mkdir(self._build_subfolder)
condigure_dir = os.path.abspath(os.path.join(self.source_folder, self._source_subfolder))
with tools.chdir(self._build_subfolder):
# http://www.linuxfromscratch.org/lfs/view/development/chapter06/gcc.html
args = ["--enable-languages=%s" % self.options.languages,
"--disable-nls",
"--disable-bootstrap",
"--disable-multilib", # this means building two architectures at once, too hard for now
"--with-system-zlib",
"--program-suffix=-%s" % self.version,
"--with-bugurl=https://github.com/bincrafters/community/issues",
"--with-pkgversion=%s" % pkgversion,
"--libdir=%s" % libdir,
"--with-gmp=%s" % self.deps_cpp_info["gmp"].rootpath,
"--with-mpfr=%s" % self.deps_cpp_info["mpfr"].rootpath,
"--with-mpc=%s" % self.deps_cpp_info["mpc"].rootpath]
args.extend(self._extra_configure_flags)
if self.settings.os == "Macos":
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/gcc.rb
args.extend(["--with-native-system-header-dir=/usr/include",
"--with-sysroot=/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk"])
# FIXME : unwind-dw2-fde-dip.c:36:10: fatal error: elf.h: No such file or directory
elf_h = os.path.join(self.source_folder, self._source_subfolder, "include", "elf.h")
if not os.path.isfile(elf_h):
tools.download("https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=elf/elf.h", elf_h)
tools.replace_in_file(elf_h, "#include <features.h>", "")
env_build = AutoToolsBuildEnvironment(self)
env_build_vars = env_build.vars
env_build_vars["SHELL"] = self._bash
env_build.libs = [] # otherwise causes config.log to fail finding -lmpc
if self.settings.compiler in ["clang", "apple-clang"]: # GCC doesn't like Clang-specific flags
if self.settings.compiler.libcxx == "libc++":
env_build.cxx_flags.remove("-stdlib=libc++")
elif self.settings.compiler.libcxx in ["libstdc++", "libstdc++11"]:
env_build.cxx_flags.remove("-stdlib=libstdc++")
env_build.configure(vars=env_build_vars, args=args, configure_dir=condigure_dir, target=self.options.target)
make_args = self._make_args
make_install_args = self._make_install_args
if self.settings.os == "Macos":
# Ensure correct install names when linking against libgcc_s;
# see discussion in https://github.com/Homebrew/legacy-homebrew/pull/34303
make_args.append("BOOT_LDFLAGS=-Wl,-headerpad_max_install_names")
make_install_args.append("BOOT_LDFLAGS=-Wl,-headerpad_max_install_names")
env_build.make(vars=env_build_vars, args=make_args)
env_build.make(vars=env_build_vars, args=make_install_args)
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
| []
| []
| [
"CONAN_BASH_PATH"
]
| [] | ["CONAN_BASH_PATH"] | python | 1 | 0 | |
cmd/minikube/cmd/start.go | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/blang/semver"
"github.com/docker/machine/libmachine/host"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
cmdutil "k8s.io/minikube/cmd/util"
"k8s.io/minikube/pkg/minikube/cluster"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/machine"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/kubeconfig"
"k8s.io/minikube/pkg/version"
)
const (
isoURL = "iso-url"
memory = "memory"
cpus = "cpus"
humanReadableDiskSize = "disk-size"
vmDriver = "vm-driver"
xhyveDiskDriver = "xhyve-disk-driver"
NFSSharesRoot = "nfs-shares-root"
NFSShare = "nfs-share"
kubernetesVersion = "kubernetes-version"
hostOnlyCIDR = "host-only-cidr"
containerRuntime = "container-runtime"
criSocket = "cri-socket"
networkPlugin = "network-plugin"
hypervVirtualSwitch = "hyperv-virtual-switch"
kvmNetwork = "kvm-network"
keepContext = "keep-context"
createMount = "mount"
featureGates = "feature-gates"
apiServerName = "apiserver-name"
dnsDomain = "dns-domain"
mountString = "mount-string"
disableDriverMounts = "disable-driver-mounts"
cacheImages = "cache-images"
uuid = "uuid"
vpnkitSock = "hyperkit-vpnkit-sock"
vsockPorts = "hyperkit-vsock-ports"
gpu = "gpu"
embedCerts = "embed-certs"
)
var (
registryMirror []string
dockerEnv []string
dockerOpt []string
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
extraOptions pkgutil.ExtraOptionSlice
)
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local kubernetes cluster",
Long: `Starts a local kubernetes cluster using VM. This command
assumes you have already installed one of the VM drivers: virtualbox/vmwarefusion/kvm/xhyve/hyperv.`,
Run: runStart,
}
func runStart(cmd *cobra.Command, args []string) {
if glog.V(8) {
glog.Infoln("Viper configuration:")
viper.Debug()
}
shouldCacheImages := viper.GetBool(cacheImages)
k8sVersion := viper.GetString(kubernetesVersion)
clusterBootstrapper := viper.GetString(cmdcfg.Bootstrapper)
var groupCacheImages errgroup.Group
if shouldCacheImages {
groupCacheImages.Go(func() error {
return machine.CacheImagesForBootstrapper(k8sVersion, clusterBootstrapper)
})
}
api, err := machine.NewAPIClient()
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting client: %v\n", err)
os.Exit(1)
}
defer api.Close()
exists, err := api.Exists(cfg.GetMachineName())
if err != nil {
glog.Exitf("checking if machine exists: %v", err)
}
diskSize := viper.GetString(humanReadableDiskSize)
diskSizeMB := pkgutil.CalculateDiskSizeInMB(diskSize)
if diskSizeMB < constants.MinimumDiskSizeMB {
err := fmt.Errorf("Disk Size %dMB (%s) is too small, the minimum disk size is %dMB", diskSizeMB, diskSize, constants.MinimumDiskSizeMB)
glog.Errorln("Error parsing disk size:", err)
os.Exit(1)
}
if viper.GetBool(gpu) && viper.GetString(vmDriver) != "kvm2" {
glog.Exitf("--gpu is only supported with --vm-driver=kvm2")
}
config := cfg.MachineConfig{
MinikubeISO: viper.GetString(isoURL),
Memory: viper.GetInt(memory),
CPUs: viper.GetInt(cpus),
DiskSize: diskSizeMB,
VMDriver: viper.GetString(vmDriver),
ContainerRuntime: viper.GetString(containerRuntime),
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
XhyveDiskDriver: viper.GetString(xhyveDiskDriver),
NFSShare: viper.GetStringSlice(NFSShare),
NFSSharesRoot: viper.GetString(NFSSharesRoot),
DockerEnv: dockerEnv,
DockerOpt: dockerOpt,
InsecureRegistry: insecureRegistry,
RegistryMirror: registryMirror,
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch),
KvmNetwork: viper.GetString(kvmNetwork),
Downloader: pkgutil.DefaultDownloader{},
DisableDriverMounts: viper.GetBool(disableDriverMounts),
UUID: viper.GetString(uuid),
GPU: viper.GetBool(gpu),
}
fmt.Printf("Starting local Kubernetes %s cluster...\n", viper.GetString(kubernetesVersion))
fmt.Println("Starting VM...")
var host *host.Host
start := func() (err error) {
host, err = cluster.StartHost(api, config)
if err != nil {
glog.Errorf("Error starting host: %v.\n\n Retrying.\n", err)
}
return err
}
err = pkgutil.RetryAfter(5, start, 2*time.Second)
if err != nil {
glog.Errorln("Error starting host: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Getting VM IP address...")
ip, err := host.Driver.GetIP()
if err != nil {
glog.Errorln("Error getting VM IP address: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
selectedKubernetesVersion := viper.GetString(kubernetesVersion)
if strings.Compare(selectedKubernetesVersion, "") == 0 {
selectedKubernetesVersion = constants.DefaultKubernetesVersion
}
// Load profile cluster config from file
cc, err := cfg.Load()
if err != nil && !os.IsNotExist(err) {
glog.Errorln("Error loading profile config: ", err)
}
if err == nil {
oldKubernetesVersion, err := semver.Make(strings.TrimPrefix(cc.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorln("Error parsing version semver: ", err)
}
newKubernetesVersion, err := semver.Make(strings.TrimPrefix(viper.GetString(kubernetesVersion), version.VersionPrefix))
if err != nil {
glog.Errorln("Error parsing version semver: ", err)
}
// Check if it's an attempt to downgrade version. Avoid version downgrad.
if newKubernetesVersion.LT(oldKubernetesVersion) {
selectedKubernetesVersion = version.VersionPrefix + oldKubernetesVersion.String()
fmt.Println("Kubernetes version downgrade is not supported. Using version:", selectedKubernetesVersion)
}
}
kubernetesConfig := cfg.KubernetesConfig{
KubernetesVersion: selectedKubernetesVersion,
NodeIP: ip,
NodeName: constants.DefaultNodeName,
APIServerName: viper.GetString(apiServerName),
APIServerNames: apiServerNames,
APIServerIPs: apiServerIPs,
DNSDomain: viper.GetString(dnsDomain),
FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime),
CRISocket: viper.GetString(criSocket),
NetworkPlugin: viper.GetString(networkPlugin),
ServiceCIDR: pkgutil.DefaultServiceCIDR,
ExtraOptions: extraOptions,
ShouldLoadCachedImages: shouldCacheImages,
}
k8sBootstrapper, err := GetClusterBootstrapper(api, clusterBootstrapper)
if err != nil {
glog.Exitf("Error getting cluster bootstrapper: %v", err)
}
// Write profile cluster configuration to file
clusterConfig := cfg.Config{
MachineConfig: config,
KubernetesConfig: kubernetesConfig,
}
if err := saveConfig(clusterConfig); err != nil {
glog.Errorln("Error saving profile cluster configuration: ", err)
}
if shouldCacheImages {
fmt.Println("Waiting for image caching to complete...")
if err := groupCacheImages.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}
}
fmt.Println("Moving files into cluster...")
if err := k8sBootstrapper.UpdateCluster(kubernetesConfig); err != nil {
glog.Errorln("Error updating cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Setting up certs...")
if err := k8sBootstrapper.SetupCerts(kubernetesConfig); err != nil {
glog.Errorln("Error configuring authentication: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Connecting to cluster...")
kubeHost, err := host.Driver.GetURL()
if err != nil {
glog.Errorln("Error connecting to cluster: ", err)
}
kubeHost = strings.Replace(kubeHost, "tcp://", "https://", -1)
kubeHost = strings.Replace(kubeHost, ":2376", ":"+strconv.Itoa(pkgutil.APIServerPort), -1)
fmt.Println("Setting up kubeconfig...")
// setup kubeconfig
kubeConfigFile := cmdutil.GetKubeConfigPath()
kubeCfgSetup := &kubeconfig.KubeConfigSetup{
ClusterName: cfg.GetMachineName(),
ClusterServerAddress: kubeHost,
ClientCertificate: constants.MakeMiniPath("client.crt"),
ClientKey: constants.MakeMiniPath("client.key"),
CertificateAuthority: constants.MakeMiniPath("ca.crt"),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
}
kubeCfgSetup.SetKubeConfigFile(kubeConfigFile)
if err := kubeconfig.SetupKubeConfig(kubeCfgSetup); err != nil {
glog.Errorln("Error setting up kubeconfig: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Stopping extra container runtimes...")
containerRuntime := viper.GetString(containerRuntime)
if config.VMDriver != constants.DriverNone && containerRuntime != "" {
if _, err := host.RunSSHCommand("sudo systemctl stop docker"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop docker.socket")
}
if err != nil {
glog.Errorf("Error stopping docker: %v", err)
}
}
if config.VMDriver != constants.DriverNone && (containerRuntime != constants.CrioRuntime && containerRuntime != constants.Cri_oRuntime) {
if _, err := host.RunSSHCommand("sudo systemctl stop crio"); err != nil {
glog.Errorf("Error stopping crio: %v", err)
}
}
if config.VMDriver != constants.DriverNone && containerRuntime != constants.RktRuntime {
if _, err := host.RunSSHCommand("sudo systemctl stop rkt-api"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop rkt-metadata")
}
if err != nil {
glog.Errorf("Error stopping rkt: %v", err)
}
}
if config.VMDriver != constants.DriverNone && containerRuntime == constants.ContainerdRuntime {
fmt.Println("Restarting containerd runtime...")
// restart containerd so that it can install all plugins
if _, err := host.RunSSHCommand("sudo systemctl restart containerd"); err != nil {
glog.Errorf("Error restarting containerd: %v", err)
}
}
if !exists || config.VMDriver == constants.DriverNone {
fmt.Println("Starting cluster components...")
if err := k8sBootstrapper.StartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error starting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
} else {
fmt.Println("Machine exists, restarting cluster components...")
if err := k8sBootstrapper.RestartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error restarting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
}
// start 9p server mount
if viper.GetBool(createMount) {
fmt.Printf("Setting up hostmount on %s...\n", viper.GetString(mountString))
path := os.Args[0]
mountDebugVal := 0
if glog.V(8) {
mountDebugVal = 1
}
mountCmd := exec.Command(path, "mount", fmt.Sprintf("--v=%d", mountDebugVal), viper.GetString(mountString))
mountCmd.Env = append(os.Environ(), constants.IsMinikubeChildProcess+"=true")
if glog.V(8) {
mountCmd.Stdout = os.Stdout
mountCmd.Stderr = os.Stderr
}
err = mountCmd.Start()
if err != nil {
glog.Errorf("Error running command minikube mount %v", err)
cmdutil.MaybeReportErrorAndExit(err)
}
err = ioutil.WriteFile(filepath.Join(constants.GetMinipath(), constants.MountProcessFileName), []byte(strconv.Itoa(mountCmd.Process.Pid)), 0644)
if err != nil {
glog.Errorf("Error writing mount process pid to file: %v", err)
cmdutil.MaybeReportErrorAndExit(err)
}
}
if kubeCfgSetup.KeepContext {
fmt.Printf("The local Kubernetes cluster has started. The kubectl context has not been altered, kubectl will require \"--context=%s\" to use the local Kubernetes cluster.\n",
kubeCfgSetup.ClusterName)
} else {
fmt.Println("Kubectl is now configured to use the cluster.")
}
if config.VMDriver == "none" {
if viper.GetBool(cfg.WantNoneDriverWarning) {
fmt.Println(`===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks` + "\n")
}
if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" {
fmt.Println(`When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below:
sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
sudo chown -R $USER $HOME/.kube
sudo chgrp -R $USER $HOME/.kube
sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
sudo chown -R $USER $HOME/.minikube
sudo chgrp -R $USER $HOME/.minikube
This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true`)
}
if err := pkgutil.MaybeChownDirRecursiveToMinikubeUser(constants.GetMinipath()); err != nil {
glog.Errorf("Error recursively changing ownership of directory %s: %s",
constants.GetMinipath(), err)
cmdutil.MaybeReportErrorAndExit(err)
}
}
fmt.Println("Loading cached images from config file.")
err = LoadCachedImagesInConfigFile()
if err != nil {
fmt.Println("Unable to load cached images from config file.")
}
}
func init() {
startCmd.Flags().Bool(keepContext, constants.DefaultKeepContext, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":"+constants.DefaultMountEndpoint, "The argument to pass the minikube mount command on start")
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors (vboxfs, xhyve-9p)")
startCmd.Flags().String(isoURL, constants.DefaultIsoUrl, "Location of the minikube iso")
startCmd.Flags().String(vmDriver, constants.DefaultVMDriver, fmt.Sprintf("VM driver is one of: %v", constants.SupportedVMDrivers))
startCmd.Flags().Int(memory, constants.DefaultMemory, "Amount of RAM allocated to the minikube VM in MB")
startCmd.Flags().Int(cpus, constants.DefaultCPUS, "Number of CPUs allocated to the minikube VM")
startCmd.Flags().String(humanReadableDiskSize, constants.DefaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g)")
startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (only supported with Virtualbox driver)")
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (only supported with HyperV driver)")
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (only supported with KVM driver)")
startCmd.Flags().String(xhyveDiskDriver, "ahci-hd", "The disk driver to use [ahci-hd|virtio-blk] (only supported with xhyve driver)")
startCmd.Flags().StringSlice(NFSShare, []string{}, "Local folders to share with Guest via NFS mounts (Only supported on with hyperkit now)")
startCmd.Flags().String(NFSSharesRoot, "/nfsshares", "Where to root the NFS Shares (defaults to /nfsshares, only supported with hyperkit now)")
startCmd.Flags().StringArrayVar(&dockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&dockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(®istryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(containerRuntime, "", "The container runtime to be used")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
startCmd.Flags().Bool(cacheImages, false, "If true, cache docker images for the current bootstrapper and load them into the machine.")
startCmd.Flags().Var(&extraOptions, "extra-config",
`A set of key=value pairs that describe configuration that may be passed to different components.
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
Valid components are: kubelet, apiserver, controller-manager, etcd, proxy, scheduler.`)
startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (only supported with Hyperkit driver).")
startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock.")
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).")
startCmd.Flags().Bool(gpu, false, "Enable experimental NVIDIA GPU support in minikube (works only with kvm2 driver on Linux)")
viper.BindPFlags(startCmd.Flags())
RootCmd.AddCommand(startCmd)
}
// saveConfig saves profile cluster configuration in
// $MINIKUBE_HOME/profiles/<profilename>/config.json
func saveConfig(clusterConfig cfg.Config) error {
data, err := json.MarshalIndent(clusterConfig, "", " ")
if err != nil {
return err
}
profileConfigFile := constants.GetProfileFile(viper.GetString(cfg.MachineProfile))
if err := os.MkdirAll(filepath.Dir(profileConfigFile), 0700); err != nil {
return err
}
if err := saveConfigToFile(data, profileConfigFile); err != nil {
return err
}
return nil
}
func saveConfigToFile(data []byte, file string) error {
if _, err := os.Stat(file); os.IsNotExist(err) {
return ioutil.WriteFile(file, data, 0600)
}
tmpfi, err := ioutil.TempFile(filepath.Dir(file), "config.json.tmp")
if err != nil {
return err
}
defer os.Remove(tmpfi.Name())
if err = ioutil.WriteFile(tmpfi.Name(), data, 0600); err != nil {
return err
}
if err = tmpfi.Close(); err != nil {
return err
}
if err = os.Remove(file); err != nil {
return err
}
if err = os.Rename(tmpfi.Name(), file); err != nil {
return err
}
return nil
}
| [
"\"CHANGE_MINIKUBE_NONE_USER\""
]
| []
| [
"CHANGE_MINIKUBE_NONE_USER"
]
| [] | ["CHANGE_MINIKUBE_NONE_USER"] | go | 1 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'py-multihash'
year = '2018'
author = 'Carson Farmer'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/carsonfarmer/py-multihash/issues/%s', '#'),
'pr': ('https://github.com/carsonfarmer/py-multihash/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| []
| []
| [
"SPELLCHECK",
"READTHEDOCS"
]
| [] | ["SPELLCHECK", "READTHEDOCS"] | python | 2 | 0 | |
common-methods.go | /*
* Minio Client (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"os"
"regexp"
"runtime"
"strings"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/probe"
)
// Check if the target URL represents folder. It may or may not exist yet.
func isTargetURLDir(targetURL string) bool {
targetURLParse := newClientURL(targetURL)
_, targetContent, err := url2Stat(targetURL)
if err != nil {
_, aliasedTargetURL, _ := mustExpandAlias(targetURL)
if aliasedTargetURL == targetURL {
return false
}
if targetURLParse.Path == string(targetURLParse.Separator) && targetURLParse.Scheme != "" {
return false
}
if strings.HasSuffix(targetURLParse.Path, string(targetURLParse.Separator)) {
return true
}
return false
}
if !targetContent.Type.IsDir() { // Target is a dir.
return false
}
return true
}
// getSource gets a reader from URL.
func getSourceStream(urlStr string) (reader io.Reader, err *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return nil, err.Trace(urlStr)
}
return getSourceStreamFromAlias(alias, urlStrFull)
}
// getSourceStreamFromAlias gets a reader from URL.
func getSourceStreamFromAlias(alias string, urlStr string) (reader io.Reader, err *probe.Error) {
sourceClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
reader, err = sourceClnt.Get()
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return reader, nil
}
// putTargetStreamFromAlias writes to URL from Reader.
func putTargetStreamFromAlias(alias string, urlStr string, reader io.Reader, size int64, progress io.Reader) (int64, *probe.Error) {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
contentType := guessURLContentType(urlStr)
var n int64
n, err = targetClnt.Put(reader, size, contentType, progress)
if err != nil {
return n, err.Trace(alias, urlStr)
}
return n, nil
}
// putTargetStream writes to URL from reader. If length=-1, read until EOF.
func putTargetStream(urlStr string, reader io.Reader, size int64) (int64, *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil {
return 0, err.Trace(alias, urlStr)
}
return putTargetStreamFromAlias(alias, urlStrFull, reader, size, nil)
}
// copyTargetStreamFromAlias copies to URL from source.
func copySourceStreamFromAlias(alias string, urlStr string, source string, size int64, progress io.Reader) *probe.Error {
targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil {
return err.Trace(alias, urlStr)
}
err = targetClnt.Copy(source, size, progress)
if err != nil {
return err.Trace(alias, urlStr)
}
return nil
}
// newClientFromAlias gives a new client interface for matching
// alias entry in the mc config file. If no matching host config entry
// is found, fs client is returned.
func newClientFromAlias(alias string, urlStr string) (Client, *probe.Error) {
hostCfg := mustGetHostConfig(alias)
if hostCfg == nil {
// No matching host config. So we treat it like a
// filesystem.
fsClient, err := fsNew(urlStr)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return fsClient, nil
}
// We have a valid alias and hostConfig. We populate the
// credentials from the match found in the config file.
s3Config := new(Config)
// secretKey retrieved from the environement overrides the one
// present in the config file
keysPairEnv := os.Getenv("MC_SECRET_" + alias)
keysPairArray := strings.Split(keysPairEnv, ":")
var accessKeyEnv, secretKeyEnv string
if len(keysPairArray) >= 1 {
accessKeyEnv = keysPairArray[0]
}
if len(keysPairArray) >= 2 {
secretKeyEnv = keysPairArray[1]
}
if len(keysPairEnv) > 0 &&
isValidAccessKey(accessKeyEnv) && isValidSecretKey(secretKeyEnv) {
s3Config.AccessKey = accessKeyEnv
s3Config.SecretKey = secretKeyEnv
} else {
if len(keysPairEnv) > 0 {
console.Errorln("Access/Secret keys associated to `" + alias + "' " +
"are found in your environment but not suitable for use. " +
"Falling back to the standard config.")
}
s3Config.AccessKey = hostCfg.AccessKey
s3Config.SecretKey = hostCfg.SecretKey
}
s3Config.Signature = hostCfg.API
s3Config.AppName = "mc"
s3Config.AppVersion = mcVersion
s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH}
s3Config.HostURL = urlStr
s3Config.Debug = globalDebug
s3Client, err := s3New(s3Config)
if err != nil {
return nil, err.Trace(alias, urlStr)
}
return s3Client, nil
}
// urlRgx - verify if aliased url is real URL.
var urlRgx = regexp.MustCompile("^https?://")
// newClient gives a new client interface
func newClient(aliasedURL string) (Client, *probe.Error) {
alias, urlStrFull, hostCfg, err := expandAlias(aliasedURL)
if err != nil {
return nil, err.Trace(aliasedURL)
}
// Verify if the aliasedURL is a real URL, fail in those cases
// indicating the user to add alias.
if hostCfg == nil && urlRgx.MatchString(aliasedURL) {
return nil, errInvalidAliasedURL(aliasedURL).Trace(aliasedURL)
}
return newClientFromAlias(alias, urlStrFull)
}
| [
"\"MC_SECRET_\" + alias"
]
| []
| [
"MC_SECRET_\" + alia"
]
| [] | ["MC_SECRET_\" + alia"] | go | 1 | 0 | |
src/main.go | package main
import (
"flag"
"fmt"
"os"
server "github.com/sprinteins/web-scientist/server"
)
func main() {
var host = flag.String("host", "localhost", "The Host")
var port = flag.String("port", "7654", "The port")
var reference = flag.String("reference", os.Getenv("REFURL"), "The reference service")
var experiment = flag.String("experiment", os.Getenv("EXPURL"), "The experiment service")
flag.Parse()
fmt.Printf("listening on http://%s:%s\n", *host, *port)
// server(*host, *port)
fmt.Printf("server started")
// server.Start(*host, *port)
var scientist = server.New(*host, *port)
scientist.SetReference(*reference)
scientist.SetExperiment(*experiment)
scientist.Start()
}
| [
"\"REFURL\"",
"\"EXPURL\""
]
| []
| [
"REFURL",
"EXPURL"
]
| [] | ["REFURL", "EXPURL"] | go | 2 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"github.com/dimfeld/httptreemux"
"github.com/globalsign/mgo"
"github.com/richardkevin/go-beers/api"
"github.com/richardkevin/go-beers/beers"
)
func main() {
PORT := os.Getenv("PORT")
addr := "127.0.0.1:" + PORT
router := httptreemux.NewContextMux()
session, err := mgo.Dial("localhost:27017/go-beers")
if err != nil {
log.Fatal(err)
}
repository := beers.NewBeerRepository(session)
router.Handler("GET", "/", &api.DefaultHandler{repository})
router.Handler(http.MethodGet, "/beer/:id", &api.GetBeerHandler{repository})
router.Handler(http.MethodGet, "/create/:name", &api.UpsertBeerHandler{repository})
log.Printf("Running web server on: http://%s\n", addr)
log.Fatal(http.ListenAndServe(addr, router))
// execute
// curl http://localhost:8081/beer/heineken
// curl -XPUT http://localhost:8081/beer/heineken -d'{"name": "heineken"}'
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
mailpile/ui.py | #
# This file contains the UserInteraction and Session classes.
#
# The Session encapsulates settings and command results, allowing commands
# to be chanined in an interactive environment.
#
# The UserInteraction classes log the progress and performance of individual
# operations and assist with rendering the results in various formats (text,
# HTML, JSON, etc.).
#
###############################################################################
import datetime
import getpass
import os
import random
import re
import sys
import tempfile
import traceback
import json
import urllib
from collections import defaultdict
from json import JSONEncoder
from jinja2 import TemplateError, TemplateSyntaxError, TemplateNotFound
from jinja2 import TemplatesNotFound, TemplateAssertionError, UndefinedError
import mailpile.commands
import mailpile.util
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.search import MailIndex
from mailpile.util import *
class SuppressHtmlOutput(Exception):
pass
def default_dict(*args):
d = defaultdict(str)
for arg in args:
d.update(arg)
return d
class NoColors:
"""Dummy color constants"""
C_SAVE = ''
C_RESTORE = ''
NORMAL = ''
BOLD = ''
NONE = ''
BLACK = ''
RED = ''
YELLOW = ''
BLUE = ''
MAGENTA = ''
CYAN = ''
FORMAT = "%s%s"
FORMAT_READLINE = "%s%s"
RESET = ''
LINE_BELOW = ''
def __init__(self):
self.lock = UiRLock()
def __enter__(self, *args, **kwargs):
return self.lock.__enter__()
def __exit__(self, *args, **kwargs):
return self.lock.__exit__(*args, **kwargs)
def max_width(self):
return 79
def color(self, text, color='', weight='', readline=False):
return '%s%s%s' % ((self.FORMAT_READLINE if readline else self.FORMAT)
% (color, weight), text, self.RESET)
def replace_line(self, text, chars=None):
pad = ' ' * max(0, min(self.max_width(),
self.max_width()-(chars or len(unicode(text)))))
return '%s%s\r' % (text, pad)
def add_line_below(self):
pass
def print_below(self):
pass
def write(self, data):
with self:
sys.stderr.write(data)
def check_max_width(self):
pass
class ANSIColors(NoColors):
"""ANSI color constants"""
NORMAL = ''
BOLD = ';1'
NONE = '0'
BLACK = "30"
RED = "31"
YELLOW = "33"
BLUE = "34"
MAGENTA = '35'
CYAN = '36'
RESET = "\x1B[0m"
FORMAT = "\x1B[%s%sm"
FORMAT_READLINE = "\001\x1B[%s%sm\002"
CURSOR_UP = "\x1B[1A"
CURSOR_DN = "\x1B[1B"
CURSOR_SAVE = "\x1B[s"
CURSOR_RESTORE = "\x1B[u"
CLEAR_LINE = "\x1B[2K"
def __init__(self):
NoColors.__init__(self)
self.check_max_width()
def replace_line(self, text, chars=None):
return '%s%s%s\r%s' % (self.CURSOR_SAVE,
self.CLEAR_LINE, text,
self.CURSOR_RESTORE)
def max_width(self):
return self.MAX_WIDTH
def check_max_width(self):
try:
import fcntl, termios, struct
fcntl_result = fcntl.ioctl(sys.stdin.fileno(),
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
h, w, hp, wp = struct.unpack('HHHH', fcntl_result)
self.MAX_WIDTH = (w-1)
except:
self.MAX_WIDTH = 79
class Completer(object):
"""Readline autocompler"""
DELIMS = ' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>?'
def __init__(self, session):
self.session = session
def _available_opts(self, text):
opts = ([s.SYNOPSIS[1] for s in mailpile.commands.COMMANDS] +
[s.SYNOPSIS[2] for s in mailpile.commands.COMMANDS] +
[t.name.lower() for t in self.session.config.tags.values()])
return sorted([o for o in opts if o and o.startswith(text)])
def _autocomplete(self, text, state):
try:
return self._available_opts(text)[state] + ' '
except IndexError:
return None
def get_completer(self):
return lambda t, s: self._autocomplete(t, s)
class UserInteraction:
"""Log the progress and performance of individual operations"""
MAX_BUFFER_LEN = 150
LOG_URGENT = 0
LOG_RESULT = 5
LOG_ERROR = 10
LOG_NOTIFY = 20
LOG_WARNING = 30
LOG_PROGRESS = 40
LOG_DEBUG = 50
LOG_ALL = 99
LOG_PREFIX = ''
def __init__(self, config, log_parent=None, log_prefix=None):
self.log_parent = log_parent
self.log_buffer = []
self.log_buffering = 0
self.log_level = self.LOG_ALL
self.log_prefix = log_prefix or self.LOG_PREFIX
self.interactive = False
self.time_tracking = [('Main', [])]
self.time_elapsed = 0.0
self.render_mode = 'text'
self.term = NoColors()
self.config = config
self.html_variables = {
'title': 'Mailpile',
'name': 'Chelsea Manning',
'csrf': '',
'even_odd': 'odd',
'mailpile_size': 0
}
# Logging
def _fmt_log(self, text, level=LOG_URGENT):
c, w, clip = self.term.NONE, self.term.NORMAL, 2048
if level == self.LOG_URGENT:
c, w = self.term.RED, self.term.BOLD
elif level == self.LOG_ERROR:
c = self.term.RED
elif level == self.LOG_WARNING:
c = self.term.YELLOW
elif level == self.LOG_NOTIFY:
c = self.term.CYAN
elif level == self.LOG_DEBUG:
c = self.term.MAGENTA
elif level == self.LOG_PROGRESS:
c, clip = self.term.BLUE, 78
formatted = self.term.replace_line(self.term.color(
unicode(text[:clip]).encode('utf-8'), color=c, weight=w),
chars=len(text[:clip]))
if level != self.LOG_PROGRESS:
formatted += '\n'
return formatted
def _display_log(self, text, level=LOG_URGENT):
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent is not None:
self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def _debug_log(self, text, level):
if text and 'log' in self.config.sys.debug:
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent is not None:
return self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def clear_log(self):
self.log_buffer = []
def flush_log(self):
try:
while len(self.log_buffer) > 0:
level, message = self.log_buffer.pop(0)
if level <= self.log_level:
self._display_log(message, level)
except IndexError:
pass
def block(self):
with self.term:
self._display_log('')
self.log_buffering += 1
def unblock(self, force=False):
with self.term:
if self.log_buffering <= 1 or force:
self.log_buffering = 0
self.flush_log()
else:
self.log_buffering -= 1
def log(self, level, message):
if self.log_buffering:
self.log_buffer.append((level, message))
while len(self.log_buffer) > self.MAX_BUFFER_LEN:
self.log_buffer[0:(self.MAX_BUFFER_LEN/10)] = []
elif level <= self.log_level:
self._display_log(message, level)
error = lambda self, msg: self.log(self.LOG_ERROR, msg)
notify = lambda self, msg: self.log(self.LOG_NOTIFY, msg)
warning = lambda self, msg: self.log(self.LOG_WARNING, msg)
progress = lambda self, msg: self.log(self.LOG_PROGRESS, msg)
debug = lambda self, msg: self.log(self.LOG_DEBUG, msg)
# Progress indication and performance tracking
times = property(lambda self: self.time_tracking[-1][1])
def mark(self, action=None, percent=None):
"""Note that we are about to perform an action."""
if not action:
try:
action = self.times[-1][1]
except IndexError:
action = 'mark'
self.progress(action)
self.times.append((time.time(), action))
def report_marks(self, quiet=False, details=False):
t = self.times
if t and t[0]:
self.time_elapsed = elapsed = t[-1][0] - t[0][0]
if not quiet:
try:
self.notify(_('Elapsed: %.3fs (%s)') % (elapsed, t[-1][1]))
if details:
for i in range(0, len(self.times)-1):
e = t[i+1][0] - t[i][0]
self.debug(' -> %.3fs (%s)' % (e, t[i][1]))
except IndexError:
self.notify(_('Elapsed: %.3fs') % elapsed)
return elapsed
return 0
def reset_marks(self, mark=True, quiet=False, details=False):
"""This sequence of actions is complete."""
if self.times and mark:
self.mark()
elapsed = self.report_marks(quiet=quiet, details=details)
self.times[:] = []
return elapsed
def push_marks(self, subtask):
"""Start tracking a new sub-task."""
self.time_tracking.append((subtask, []))
def pop_marks(self, name=None, quiet=True):
"""Sub-task ended!"""
elapsed = self.report_marks(quiet=quiet)
if len(self.time_tracking) > 1:
if not name or (self.time_tracking[-1][0] == name):
self.time_tracking.pop(-1)
return elapsed
# Higher level command-related methods
def _display_result(self, result):
with self.term:
sys.stdout.write(unicode(result).encode('utf-8').rstrip())
sys.stdout.write('\n')
def start_command(self, cmd, args, kwargs):
self.flush_log()
self.push_marks(cmd)
self.mark(('%s(%s)'
) % (cmd, ', '.join((args or tuple()) +
('%s' % kwargs, ))))
def finish_command(self, cmd):
self.pop_marks(name=cmd)
def display_result(self, result):
"""Render command result objects to the user"""
if self.render_mode == 'json':
return self._display_result(result.as_('json'))
for suffix in ('css', 'html', 'js', 'rss', 'txt', 'xml'):
if self.render_mode.endswith(suffix):
jsuffix = 'j' + suffix
if self.render_mode in (suffix, jsuffix):
template = 'as.' + suffix
else:
template = self.render_mode.replace('.' + jsuffix,
'.' + suffix)
return self._display_result(
result.as_template(suffix, template=template))
return self._display_result(unicode(result))
# Creating output files
DEFAULT_DATA_NAME_FMT = '%(msg_mid)s.%(count)s_%(att_name)s.%(att_ext)s'
DEFAULT_DATA_ATTRS = {
'msg_mid': 'file',
'mimetype': 'application/octet-stream',
'att_name': 'unnamed',
'att_ext': 'dat',
'rand': '0000'
}
DEFAULT_DATA_EXTS = {
# FIXME: Add more!
'text/plain': 'txt',
'text/html': 'html',
'image/gif': 'gif',
'image/jpeg': 'jpg',
'image/png': 'png'
}
def _make_data_filename(self, name_fmt, attributes):
return (name_fmt or self.DEFAULT_DATA_NAME_FMT) % attributes
def _make_data_attributes(self, attributes={}):
attrs = self.DEFAULT_DATA_ATTRS.copy()
attrs.update(attributes)
attrs['rand'] = '%4.4x' % random.randint(0, 0xffff)
if attrs['att_ext'] == self.DEFAULT_DATA_ATTRS['att_ext']:
if attrs['mimetype'] in self.DEFAULT_DATA_EXTS:
attrs['att_ext'] = self.DEFAULT_DATA_EXTS[attrs['mimetype']]
return attrs
def open_for_data(self, name_fmt=None, attributes={}):
filename = self._make_data_filename(
name_fmt, self._make_data_attributes(attributes))
return filename, open(filename, 'w')
# Rendering helpers for templating and such
def render_json(self, data):
"""Render data as JSON"""
class NoFailEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode,
int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
if isinstance(obj, datetime.datetime):
return str(obj)
return "COMPLEXBLOB"
return json.dumps(data, indent=1, cls=NoFailEncoder,
sort_keys=True, allow_nan=False)
def _web_template(self, config, tpl_names, elems=None):
env = config.jinja_env
env.session = Session(config)
env.session.ui = HttpUserInteraction(None, config, log_parent=self)
for fn in tpl_names:
try:
# FIXME(Security): Here we need to sanitize the file name
# very strictly in case it somehow came
# from user data.
return env.get_template(fn)
except (IOError, OSError, AttributeError), e:
pass
return None
def render_web(self, cfg, tpl_names, data):
"""Render data as HTML"""
alldata = default_dict(self.html_variables)
alldata["config"] = cfg
alldata.update(data)
try:
template = self._web_template(cfg, tpl_names)
if template:
return template.render(alldata)
else:
emsg = _("<h1>Template not found</h1>\n<p>%s</p><p>"
"<b>DATA:</b> %s</p>")
tpl_esc_names = [escape_html(tn) for tn in tpl_names]
return emsg % (' or '.join(tpl_esc_names),
escape_html('%s' % alldata))
except (UndefinedError, ):
emsg = _("<h1>Template error</h1>\n"
"<pre>%s</pre>\n<p>%s</p><p><b>DATA:</b> %s</p>")
return emsg % (escape_html(traceback.format_exc()),
' or '.join([escape_html(tn) for tn in tpl_names]),
escape_html('%.4096s' % alldata))
except (TemplateNotFound, TemplatesNotFound), e:
emsg = _("<h1>Template not found in %s</h1>\n"
"<b>%s</b><br/>"
"<div><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.message,
'%.4096s' % alldata)])
except (TemplateError, TemplateSyntaxError,
TemplateAssertionError,), e:
emsg = _("<h1>Template error in %s</h1>\n"
"Parsing template %s: <b>%s</b> on line %s<br/>"
"<div><xmp>%s</xmp><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.filename, e.message,
e.lineno, e.source,
'%.4096s' % alldata)])
def edit_messages(self, session, emails):
if not self.interactive:
return False
for e in emails:
if not e.is_editable():
from mailpile.mailutils import NotEditableError
raise NotEditableError(_('Message %s is not editable')
% e.msg_mid())
sep = '-' * 79 + '\n'
edit_this = ('\n'+sep).join([e.get_editing_string() for e in emails])
tf = tempfile.NamedTemporaryFile()
tf.write(edit_this.encode('utf-8'))
tf.flush()
with self.term:
try:
self.block()
os.system('%s %s' % (os.getenv('VISUAL', default='vi'),
tf.name))
finally:
self.unblock()
tf.seek(0, 0)
edited = tf.read().decode('utf-8')
tf.close()
if edited == edit_this:
return False
updates = [t.strip() for t in edited.split(sep)]
if len(updates) != len(emails):
raise ValueError(_('Number of edit messages does not match!'))
for i in range(0, len(updates)):
emails[i].update_from_string(session, updates[i])
return True
def get_password(self, prompt):
if not self.interactive:
return ''
with self.term:
try:
self.block()
return getpass.getpass(prompt.encode('utf-8')).decode('utf-8')
finally:
self.unblock()
class HttpUserInteraction(UserInteraction):
LOG_PREFIX = 'http/'
def __init__(self, request, *args, **kwargs):
UserInteraction.__init__(self, *args, **kwargs)
self.request = request
self.logged = []
self.results = []
# Just buffer up rendered data
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
self.logged.append((level, text))
def _display_result(self, result):
self.results.append(result)
# Stream raw data to the client on open_for_data
def open_for_data(self, name_fmt=None, attributes={}):
return 'HTTP Client', RawHttpResponder(self.request, attributes)
def _render_text_responses(self, config):
if config.sys.debug:
return '%s\n%s' % (
'\n'.join([l[1] for l in self.logged]),
('\n%s\n' % ('=' * 79)).join(self.results)
)
else:
return ('\n%s\n' % ('=' * 79)).join(self.results)
def _render_single_response(self, config):
if len(self.results) == 1:
return self.results[0]
if len(self.results) > 1:
raise Exception(_('FIXME: Multiple results, OMG WTF'))
return ""
def render_response(self, config):
if (self.render_mode == 'json' or
self.render_mode.split('.')[-1] in ('jcss', 'jhtml', 'jjs',
'jrss', 'jtxt', 'jxml')):
if len(self.results) == 1:
return ('application/json', self.results[0])
else:
return ('application/json', '[%s]' % ','.join(self.results))
elif self.render_mode.endswith('html'):
return ('text/html', self._render_single_response(config))
elif self.render_mode.endswith('js'):
return ('text/javascript', self._render_single_response(config))
elif self.render_mode.endswith('css'):
return ('text/css', self._render_single_response(config))
elif self.render_mode.endswith('txt'):
return ('text/plain', self._render_single_response(config))
elif self.render_mode.endswith('rss'):
return ('application/rss+xml',
self._render_single_response(config))
elif self.render_mode.endswith('xml'):
return ('application/xml', self._render_single_response(config))
else:
return ('text/plain', self._render_text_responses(config))
def edit_messages(self, session, emails):
return False
class BackgroundInteraction(UserInteraction):
LOG_PREFIX = 'bg/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def edit_messages(self, session, emails):
return False
class SilentInteraction(UserInteraction):
LOG_PREFIX = 'silent/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def _display_result(self, result):
return result
def edit_messages(self, session, emails):
return False
class CapturingUserInteraction(UserInteraction):
def __init__(self, config):
mailpile.ui.UserInteraction.__init__(self, config)
self.captured = ''
def _display_result(self, result):
self.captured = unicode(result)
class RawHttpResponder:
def __init__(self, request, attributes={}):
self.raised = False
self.request = request
#
# FIXME: Security risks here, untrusted content may find its way into
# our raw HTTP headers etc.
#
mimetype = attributes.get('mimetype', 'application/octet-stream')
filename = attributes.get('filename', 'attachment.dat'
).replace('"', '')
disposition = attributes.get('disposition', 'attachment')
length = attributes['length']
request.send_http_response(200, 'OK')
headers = [
('Content-Length', length),
]
if disposition and filename:
encfilename = urllib.quote(filename.encode("utf-8"))
headers.append(('Content-Disposition',
'%s; filename*=UTF-8\'\'%s' % (disposition,
encfilename)))
elif disposition:
headers.append(('Content-Disposition', disposition))
request.send_standard_headers(header_list=headers,
mimetype=mimetype)
def write(self, data):
self.request.wfile.write(data)
def close(self):
if not self.raised:
self.raised = True
raise SuppressHtmlOutput()
class Session(object):
@classmethod
def Snapshot(cls, session, **copy_kwargs):
return cls(session.config).copy(session, **copy_kwargs)
def __init__(self, config):
self.config = config
self.main = False
self.ui = UserInteraction(config)
self.wait_lock = threading.Condition(UiRLock())
self.task_results = []
self.order = None
self.results = []
self.searched = []
self.last_event_id = None
self.displayed = None
self.context = None
def set_interactive(self, val):
self.ui.interactive = val
interactive = property(lambda s: s.ui.interactive,
lambda s, v: s.set_interactive(v))
def copy(self, session, ui=False, search=True):
if ui is True:
self.main = session.main
self.ui = session.ui
if search:
self.order = session.order
self.results = session.results[:]
self.searched = session.searched[:]
self.displayed = session.displayed
self.context = session.context
return self
def get_context(self, update=False):
if update or not self.context:
if self.searched:
sid = self.config.search_history.add(self.searched,
self.results,
self.order)
self.context = 'search:%s' % sid
return self.context
def load_context(self, context):
if self.context and self.context == context:
return context
try:
if context.startswith('search:'):
s, r, o = self.config.search_history.get(self, context[7:])
self.searched, self.results, self.order = s, r, o
self.displayed = None
self.context = context
return context
else:
return False
except (KeyError, ValueError):
return False
def report_task_completed(self, name, result):
with self.wait_lock:
self.task_results.append((name, result))
self.wait_lock.notify_all()
def report_task_failed(self, name):
self.report_task_completed(name, None)
def wait_for_task(self, wait_for, quiet=False):
while not mailpile.util.QUITTING:
with self.wait_lock:
for i in range(0, len(self.task_results)):
if self.task_results[i][0] == wait_for:
tn, rv = self.task_results.pop(i)
self.ui.reset_marks(quiet=quiet)
return rv
self.wait_lock.wait()
def error(self, message):
self.ui.error(message)
if not self.interactive:
sys.exit(1)
| []
| []
| [
"VISUAL"
]
| [] | ["VISUAL"] | python | 1 | 0 | |
test/unit/models/test_proposals.py | # -*- coding: utf-8 -*-
import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
# clear DB tables before each execution
def setup():
# clear tables first
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://bluemncentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://bluemncentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# Proposal
@pytest.fixture
def proposal():
# NOTE: no governance_object_id is set
pobj = Proposal(
start_epoch=1483250400, # 2017-01-01
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://bluemncentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
# NOTE: this object is (intentionally) not saved yet.
# We want to return an built, but unsaved, object
return pobj
def test_proposal_is_valid(proposal):
from bluemnd import BluemnDaemon
import bluemnlib
bluemnd = BluemnDaemon.from_bluemn_conf(config.bluemn_conf)
orig = Proposal(**proposal.get_dict()) # make a copy
# fixture as-is should be valid
assert proposal.is_valid() is True
# ============================================================
# ensure end_date not greater than start_date
# ============================================================
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid proposal name
# ============================================================
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
proposal.name = 'valid-name'
assert proposal.is_valid() is True
proposal.name = ' mostly-valid-name'
assert proposal.is_valid() is False
proposal.name = 'also-mostly-valid-name '
assert proposal.is_valid() is False
proposal.name = ' similarly-kinda-valid-name '
assert proposal.is_valid() is False
proposal.name = 'dean miller 5493'
assert proposal.is_valid() is False
proposal.name = 'dean-millerà-5493'
assert proposal.is_valid() is False
proposal.name = 'dean-миллер-5493'
assert proposal.is_valid() is False
# binary gibberish
proposal.name = bluemnlib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid payment address
# ============================================================
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
# this is actually the Bluemn foundation multisig address...
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
proposal.payment_address = ' yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui '
assert proposal.is_valid() is False
proposal.payment_address = ' yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui '
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# validate URL
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = ' http://bit.ly/1e1EYJv'
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv '
assert proposal.is_valid() is False
proposal.url = ' http://bit.ly/1e1EYJv '
assert proposal.is_valid() is False
proposal.url = 'http://::12.34.56.78]/'
assert proposal.is_valid() is False
proposal.url = 'http://[::1/foo/bad]/bad'
assert proposal.is_valid() is False
proposal.url = 'http://bluemncentral.org/dean-miller 5493'
assert proposal.is_valid() is False
proposal.url = 'http://bluemncentralisé.org/dean-miller-5493'
assert proposal.is_valid() is True
proposal.url = 'http://bluemncentralisé.org/dean-миллер-5493'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
# gibberish URL
proposal.url = bluemnlib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure proposal can't request negative bluemn
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from bluemnd import BluemnDaemon
bluemnd = BluemnDaemon.from_bluemn_conf(config.bluemn_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_bluemnd(bluemnd, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
def test_proposal_size(proposal):
orig = Proposal(**proposal.get_dict()) # make a copy
proposal.url = 'https://testurl.com/'
proposal_length_bytes = len(proposal.serialise()) // 2
# how much space is available in the Proposal
extra_bytes = (Proposal.MAX_DATA_SIZE - proposal_length_bytes)
# fill URL field with max remaining space
proposal.url = proposal.url + ('x' * extra_bytes)
# ensure this is the max proposal size and is valid
assert (len(proposal.serialise()) // 2) == Proposal.MAX_DATA_SIZE
assert proposal.is_valid() is True
# add one more character to URL, Proposal should now be invalid
proposal.url = proposal.url + 'x'
assert (len(proposal.serialise()) // 2) == (Proposal.MAX_DATA_SIZE + 1)
assert proposal.is_valid() is False
| []
| []
| [
"SENTINEL_ENV",
"SENTINEL_CONFIG"
]
| [] | ["SENTINEL_ENV", "SENTINEL_CONFIG"] | python | 2 | 0 | |
example/calculator/client/client.go | package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/edegens/ptype/cluster"
"github.com/edegens/ptype/example/calculator"
)
func main() {
cfg, err := cluster.ConfigFromFile(os.Getenv("CONFIG"))
if err != nil {
log.Fatal(err)
}
c, err := cluster.Join(context.Background(), cfg)
if err != nil {
log.Fatal(err)
}
services, err := c.Registry.Services(context.Background())
if err != nil {
log.Fatal(err)
}
fmt.Printf("client: services %v\n", services)
// let the http server spin up after etcd
time.Sleep(500 * time.Millisecond)
client, err := c.NewClient("calculator", nil)
if err != nil {
log.Fatal(err)
}
args := &calculator.Args{A: 7, B: 8}
var reply int
err = client.Call("Calculator.Multiply", args, &reply)
if err != nil {
log.Fatal("client error:", err)
}
fmt.Printf("client: %d*%d=%d\n", args.A, args.B, reply)
}
| [
"\"CONFIG\""
]
| []
| [
"CONFIG"
]
| [] | ["CONFIG"] | go | 1 | 0 | |
test/e2e/e2e_test.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"flag"
"os"
"testing"
"k8s.io/component-base/logs"
"k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
. "github.com/onsi/ginkgo"
// test sources
_ "github.com/intel/pmem-csi/test/e2e/gotests"
_ "github.com/intel/pmem-csi/test/e2e/storage"
)
func TestMain(m *testing.M) {
klog.SetOutput(GinkgoWriter)
logs.InitLogs()
config.CopyFlags(config.Flags, flag.CommandLine)
framework.RegisterCommonFlags(flag.CommandLine)
framework.RegisterClusterFlags(flag.CommandLine)
// Skip slow or distruptive tests by default.
flag.Set("ginkgo.skip", `\[Slow|Disruptive\]`)
flag.Parse()
// Register framework flags, then handle flags.
framework.AfterReadingAllFlags(&framework.TestContext)
// We need extra files at runtime.
repoRoot := os.Getenv("REPO_ROOT")
if repoRoot != "" {
testfiles.AddFileSource(RootFileSource{Root: repoRoot})
}
// Now run the test suite.
os.Exit(m.Run())
}
func TestE2E(t *testing.T) {
RunE2ETests(t)
}
| [
"\"REPO_ROOT\""
]
| []
| [
"REPO_ROOT"
]
| [] | ["REPO_ROOT"] | go | 1 | 0 | |
tests/integration/devfile/cmd_devfile_catalog_test.go | package devfile
import (
"encoding/json"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/redhat-developer/odo/tests/helper"
)
var _ = Describe("odo devfile catalog command tests", func() {
var commonVar helper.CommonVar
// This is run before every Spec (It)
var _ = BeforeEach(func() {
commonVar = helper.CommonBeforeEach()
helper.Chdir(commonVar.Context)
// For some reason on TravisCI, there are flakes with regards to registrycachetime and doing
// odo catalog list components.
// TODO: Investigate this more.
helper.Cmd("odo", "preference", "set", "registrycachetime", "0").ShouldPass()
})
// This is run after every Spec (It)
var _ = AfterEach(func() {
helper.CommonAfterEach(commonVar)
})
It("should list components successfully even with an invalid kubeconfig path or path points to existing directory", func() {
originalKC := os.Getenv("KUBECONFIG")
err := os.Setenv("KUBECONFIG", "/idonotexist")
Expect(err).ToNot(HaveOccurred())
helper.Cmd("odo", "catalog", "list", "components").ShouldPass()
err = os.Setenv("KUBECONFIG", commonVar.Context)
Expect(err).ToNot(HaveOccurred())
helper.Cmd("odo", "catalog", "list", "components").ShouldPass()
err = os.Setenv("KUBECONFIG", originalKC)
Expect(err).ToNot(HaveOccurred())
})
When("executing catalog list components", func() {
var output string
BeforeEach(func() {
output = helper.Cmd("odo", "catalog", "list", "components").ShouldPass().Out()
})
It("should list all supported devfile components", func() {
wantOutput := []string{
"Odo Devfile Components",
"NAME",
"java-springboot",
"java-openliberty",
"java-quarkus",
"DESCRIPTION",
"REGISTRY",
"DefaultDevfileRegistry",
}
helper.MatchAllInOutput(output, wantOutput)
})
})
When("executing catalog list components with -o json flag", func() {
var output string
BeforeEach(func() {
output = helper.Cmd("odo", "catalog", "list", "components", "-o", "json").ShouldPass().Out()
})
It("should list devfile components in json format", func() {
var outputData interface{}
unmarshalErr := json.Unmarshal([]byte(output), &outputData)
Expect(unmarshalErr).NotTo(HaveOccurred(), "Output is not a valid JSON")
wantOutput := []string{
"odo.dev/v1alpha1",
"items",
"java-openliberty",
"java-springboot",
"nodejs",
"java-quarkus",
"java-maven",
}
helper.MatchAllInOutput(output, wantOutput)
})
})
When("executing catalog describe component with -o json", func() {
var output string
BeforeEach(func() {
output = helper.Cmd("odo", "catalog", "describe", "component", "nodejs", "-o", "json").ShouldPass().Out()
})
It("should display a valid JSON", func() {
var outputData interface{}
unmarshalErr := json.Unmarshal([]byte(output), &outputData)
Expect(unmarshalErr).NotTo(HaveOccurred(), "Output is not a valid JSON")
})
})
When("adding a registry that is not set up properly", func() {
var output string
BeforeEach(func() {
helper.Cmd("odo", "registry", "add", "fake", "http://fake").ShouldPass()
output = helper.Cmd("odo", "catalog", "list", "components").ShouldPass().Out()
})
AfterEach(func() {
helper.Cmd("odo", "registry", "delete", "fake", "-f").ShouldPass()
})
It("should list components from valid registry", func() {
helper.MatchAllInOutput(output, []string{
"Odo Devfile Components",
"java-springboot",
"java-quarkus",
})
})
})
When("adding multiple registries", func() {
const registryName string = "RegistryName"
// Use staging OCI-based registry for tests to avoid overload
const addRegistryURL string = "https://registry.stage.devfile.io"
var output string
BeforeEach(func() {
helper.Cmd("odo", "registry", "add", registryName, addRegistryURL).ShouldPass()
output = helper.Cmd("odo", "catalog", "describe", "component", "nodejs").ShouldPass().Out()
})
It("should print multiple devfiles from different registries", func() {
helper.MatchAllInOutput(output, []string{"name: nodejs-starter", "Registry: " + registryName})
})
})
})
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
pkg/ccl/storageccl/export_storage_test.go | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/LICENSE
package storageccl
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/rlmcpherson/s3gof3r"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)
func appendPath(t *testing.T, s, add string) string {
u, err := url.Parse(s)
if err != nil {
t.Fatal(err)
}
u.Path = path.Join(u.Path, add)
return u.String()
}
func storeFromURI(ctx context.Context, t *testing.T, uri string) ExportStorage {
conf, err := ExportStorageConfFromURI(uri)
if err != nil {
t.Fatal(err)
}
// Setup a sink for the given args.
s, err := MakeExportStorage(ctx, conf)
if err != nil {
t.Fatal(err)
}
return s
}
func testExportStore(t *testing.T, storeURI string, skipSingleFile bool) {
ctx := context.TODO()
conf, err := ExportStorageConfFromURI(storeURI)
if err != nil {
t.Fatal(err)
}
// Setup a sink for the given args.
s, err := MakeExportStorage(ctx, conf)
if err != nil {
t.Fatal(err)
}
defer s.Close()
if readConf := s.Conf(); readConf != conf {
t.Fatalf("conf does not roundtrip: started with %+v, got back %+v", conf, readConf)
}
t.Run("simple round trip", func(t *testing.T) {
sampleName := "somebytes"
sampleBytes := "hello world"
for i := 0; i < 10; i++ {
name := fmt.Sprintf("%s-%d", sampleName, i)
payload := []byte(strings.Repeat(sampleBytes, i))
if err := s.WriteFile(ctx, name, bytes.NewReader(payload)); err != nil {
t.Fatal(err)
}
r, err := s.ReadFile(ctx, name)
if err != nil {
t.Fatal(err)
}
defer r.Close()
res, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(res, payload) {
t.Fatalf("got %v expected %v", res, payload)
}
if err := s.Delete(ctx, name); err != nil {
t.Fatal(err)
}
}
})
// The azure driver makes us chunk files that are greater than 4mb, so make
// sure that files larger than that work on all the providers.
t.Run("8mb-tempfile", func(t *testing.T) {
const size = 1024 * 1024 * 8 // 8MiB
testingContent := make([]byte, size)
if _, err := rand.Read(testingContent); err != nil {
t.Fatal(err)
}
testingFilename := "testing-123"
// Write some random data (random so it doesn't compress).
if err := s.WriteFile(ctx, testingFilename, bytes.NewReader(testingContent)); err != nil {
t.Fatal(err)
}
// Attempt to read (or fetch) it back.
res, err := s.ReadFile(ctx, testingFilename)
if err != nil {
t.Fatalf("Could not get reader for %s: %+v", testingFilename, err)
}
defer res.Close()
content, err := ioutil.ReadAll(res)
if err != nil {
t.Fatal(err)
}
// Verify the result contains what we wrote.
if !bytes.Equal(content, testingContent) {
t.Fatalf("wrong content")
}
if err := s.Delete(ctx, testingFilename); err != nil {
t.Fatal(err)
}
})
if skipSingleFile {
return
}
t.Run("read-single-file-by-uri", func(t *testing.T) {
if err := s.WriteFile(ctx, "A", bytes.NewReader([]byte("aaa"))); err != nil {
t.Fatal(err)
}
singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, "A"))
defer singleFile.Close()
res, err := singleFile.ReadFile(ctx, "")
if err != nil {
t.Fatal(err)
}
defer res.Close()
content, err := ioutil.ReadAll(res)
if err != nil {
t.Fatal(err)
}
// Verify the result contains what we wrote.
if !bytes.Equal(content, []byte("aaa")) {
t.Fatalf("wrong content")
}
})
t.Run("write-single-file-by-uri", func(t *testing.T) {
singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, "B"))
defer singleFile.Close()
if err := singleFile.WriteFile(ctx, "", bytes.NewReader([]byte("bbb"))); err != nil {
t.Fatal(err)
}
res, err := s.ReadFile(ctx, "B")
if err != nil {
t.Fatal(err)
}
defer res.Close()
content, err := ioutil.ReadAll(res)
if err != nil {
t.Fatal(err)
}
// Verify the result contains what we wrote.
if !bytes.Equal(content, []byte("bbb")) {
t.Fatalf("wrong content")
}
})
}
func TestPutLocal(t *testing.T) {
defer leaktest.AfterTest(t)()
p, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
dest, err := MakeLocalStorageURI(p)
if err != nil {
t.Fatal(err)
}
testExportStore(t, dest, false)
}
func TestPutHttp(t *testing.T) {
defer leaktest.AfterTest(t)()
tmp, dirCleanup := testutils.TempDir(t)
defer dirCleanup()
makeServer := func() (*url.URL, func() int, func()) {
var files int
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
localfile := filepath.Join(tmp, filepath.Base(r.URL.Path))
switch r.Method {
case "PUT":
f, err := os.Create(localfile)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
defer f.Close()
defer r.Body.Close()
if _, err := io.Copy(f, r.Body); err != nil {
http.Error(w, err.Error(), 500)
return
}
files++
case "GET":
http.ServeFile(w, r, localfile)
case "DELETE":
if err := os.Remove(localfile); err != nil {
http.Error(w, err.Error(), 500)
return
}
default:
http.Error(w, "unsupported method "+r.Method, 400)
}
}))
t.Logf("Mock HTTP Storage %q", srv.URL)
uri, err := url.Parse(srv.URL)
if err != nil {
srv.Close()
t.Fatal(err)
}
uri.Path = path.Join(uri.Path, "testing")
return uri, func() int { return files }, srv.Close
}
t.Run("singleHost", func(t *testing.T) {
srv, files, cleanup := makeServer()
defer cleanup()
testExportStore(t, srv.String(), false)
if expected, actual := 13, files(); expected != actual {
t.Fatalf("expected %d files to be written to single http store, got %d", expected, actual)
}
})
t.Run("multiHost", func(t *testing.T) {
srv1, files1, cleanup1 := makeServer()
defer cleanup1()
srv2, files2, cleanup2 := makeServer()
defer cleanup2()
srv3, files3, cleanup3 := makeServer()
defer cleanup3()
combined := *srv1
combined.Host = strings.Join([]string{srv1.Host, srv2.Host, srv3.Host}, ",")
testExportStore(t, combined.String(), true)
if expected, actual := 3, files1(); expected != actual {
t.Fatalf("expected %d files written to http host 1, got %d", expected, actual)
}
if expected, actual := 4, files2(); expected != actual {
t.Fatalf("expected %d files written to http host 2, got %d", expected, actual)
}
if expected, actual := 4, files3(); expected != actual {
t.Fatalf("expected %d files written to http host 3, got %d", expected, actual)
}
})
}
func TestPutS3(t *testing.T) {
defer leaktest.AfterTest(t)()
s3Keys, err := s3gof3r.EnvKeys()
if err != nil {
s3Keys, err = s3gof3r.InstanceKeys()
if err != nil {
t.Skip("No AWS keys instance or env keys")
}
}
bucket := os.Getenv("AWS_S3_BUCKET")
if bucket == "" {
t.Skip("AWS_S3_BUCKET env var must be set")
}
// TODO(dt): this prevents leaking an http conn goroutine.
http.DefaultTransport.(*http.Transport).DisableKeepAlives = true
testExportStore(t,
fmt.Sprintf(
"s3://%s/%s?%s=%s&%s=%s",
bucket, "backup-test",
S3AccessKeyParam, url.QueryEscape(s3Keys.AccessKey),
S3SecretParam, url.QueryEscape(s3Keys.SecretKey),
),
false,
)
}
func TestPutGoogleCloud(t *testing.T) {
defer leaktest.AfterTest(t)()
bucket := os.Getenv("GS_BUCKET")
if bucket == "" {
t.Skip("GS_BUCKET env var must be set")
}
// TODO(dt): this prevents leaking an http conn goroutine.
http.DefaultTransport.(*http.Transport).DisableKeepAlives = true
testExportStore(t, fmt.Sprintf("gs://%s/%s", bucket, "backup-test"), false)
}
func TestPutAzure(t *testing.T) {
defer leaktest.AfterTest(t)()
accountName := os.Getenv("AZURE_ACCOUNT_NAME")
accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
if accountName == "" || accountKey == "" {
t.Skip("AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY env vars must be set")
}
bucket := os.Getenv("AZURE_CONTAINER")
if bucket == "" {
t.Skip("AZURE_CONTAINER env var must be set")
}
// TODO(dt): this prevents leaking an http conn goroutine.
http.DefaultTransport.(*http.Transport).DisableKeepAlives = true
testExportStore(t,
fmt.Sprintf("azure://%s/%s?%s=%s&%s=%s",
bucket, "backup-test",
AzureAccountNameParam, url.QueryEscape(accountName),
AzureAccountKeyParam, url.QueryEscape(accountKey),
),
false,
)
}
| [
"\"AWS_S3_BUCKET\"",
"\"GS_BUCKET\"",
"\"AZURE_ACCOUNT_NAME\"",
"\"AZURE_ACCOUNT_KEY\"",
"\"AZURE_CONTAINER\""
]
| []
| [
"AZURE_ACCOUNT_NAME",
"AZURE_ACCOUNT_KEY",
"GS_BUCKET",
"AZURE_CONTAINER",
"AWS_S3_BUCKET"
]
| [] | ["AZURE_ACCOUNT_NAME", "AZURE_ACCOUNT_KEY", "GS_BUCKET", "AZURE_CONTAINER", "AWS_S3_BUCKET"] | go | 5 | 0 | |
config.py | import os
class Config(object):
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
DATABASE_URL = os.environ['DATABASE_URL']
SQLALCHEMY_DATABASE_URI = DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = True
TROPO_API_KEY_TEXT = os.environ.get('TROPO_API_KEY_TEXT', "TEXT TOKEN NOT PROVIDED")
TROPO_API_KEY_VOICE = os.environ.get('TROPO_API_KEY_VOICE', "VOICE TOKEN NOT PROVIDED")
SPARK_TOKEN = os.environ.get('SPARK_TOKEN', "TOKEN-NOT-PROVIDED")
ON_CISCO_NETWORK = os.environ.get('ON_CISCO_NETWORK', False)
NOTIFICATION_SMS_PHONE_NUMBER = os.environ.get('NOTIFICATION_SMS_PHONE_NUMBER', False)
SPARK_DEFAULT_ROOM_ID = os.environ.get('SPARK_DEFAULT_ROOM_ID', False)
SMS_ENABLED = os.environ.get('SMS_ENABLED', False)
SMS_ENABLED = (SMS_ENABLED == 'True' or SMS_ENABLED == 'TRUE')
SHOW_WEB_LINK = os.environ.get('SHOW_WEB_LINK', False)
SHOW_WEB_LINK = (SHOW_WEB_LINK == 'True' or SHOW_WEB_LINK == 'TRUE')
ADMIN_NAME = os.environ.get('ADMIN_NAME', '')
MERAKI_VALIDATOR_TOKEN = os.environ.get('MERAKI_VALIDATOR', "TOKEN-NOT-PROVIDED")
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| []
| []
| [
"MERAKI_VALIDATOR",
"SPARK_DEFAULT_ROOM_ID",
"SPARK_TOKEN",
"DATABASE_URL",
"ON_CISCO_NETWORK",
"ADMIN_NAME",
"TROPO_API_KEY_TEXT",
"SECRET_KEY",
"NOTIFICATION_SMS_PHONE_NUMBER",
"SHOW_WEB_LINK",
"TROPO_API_KEY_VOICE",
"SMS_ENABLED"
]
| [] | ["MERAKI_VALIDATOR", "SPARK_DEFAULT_ROOM_ID", "SPARK_TOKEN", "DATABASE_URL", "ON_CISCO_NETWORK", "ADMIN_NAME", "TROPO_API_KEY_TEXT", "SECRET_KEY", "NOTIFICATION_SMS_PHONE_NUMBER", "SHOW_WEB_LINK", "TROPO_API_KEY_VOICE", "SMS_ENABLED"] | python | 12 | 0 | |
utils/subpath.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package utils
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
"github.com/mattermost/mattermost-server/v6/utils/fileutils"
)
// getSubpathScript renders the inline script that defines window.publicPath to change how webpack loads assets.
func getSubpathScript(subpath string) string {
if subpath == "" {
subpath = "/"
}
newPath := path.Join(subpath, "static") + "/"
return fmt.Sprintf("window.publicPath='%s'", newPath)
}
// GetSubpathScriptHash computes the script-src addition required for the subpath script to bypass CSP protections.
func GetSubpathScriptHash(subpath string) string {
// No hash is required for the default subpath.
if subpath == "" || subpath == "/" {
return ""
}
scriptHash := sha256.Sum256([]byte(getSubpathScript(subpath)))
return fmt.Sprintf(" 'sha256-%s'", base64.StdEncoding.EncodeToString(scriptHash[:]))
}
// UpdateAssetsSubpathInDir rewrites assets in the given directory to assume the application is
// hosted at the given subpath instead of at the root. No changes are written unless necessary.
func UpdateAssetsSubpathInDir(subpath, directory string) error {
if subpath == "" {
subpath = "/"
}
staticDir, found := fileutils.FindDir(directory)
if !found {
return errors.New("failed to find client dir")
}
staticDir, err := filepath.EvalSymlinks(staticDir)
if err != nil {
return errors.Wrapf(err, "failed to resolve symlinks to %s", staticDir)
}
rootHTMLPath := filepath.Join(staticDir, "root.html")
oldRootHTML, err := ioutil.ReadFile(rootHTMLPath)
if err != nil {
return errors.Wrap(err, "failed to open root.html")
}
oldSubpath := "/"
// Determine if a previous subpath had already been rewritten into the assets.
reWebpackPublicPathScript := regexp.MustCompile("window.publicPath='([^']+/)static/'")
alreadyRewritten := false
if matches := reWebpackPublicPathScript.FindStringSubmatch(string(oldRootHTML)); matches != nil {
oldSubpath = matches[1]
alreadyRewritten = true
}
pathToReplace := path.Join(oldSubpath, "static") + "/"
newPath := path.Join(subpath, "static") + "/"
mlog.Debug("Rewriting static assets", mlog.String("from_subpath", oldSubpath), mlog.String("to_subpath", subpath))
newRootHTML := string(oldRootHTML)
reCSP := regexp.MustCompile(`<meta http-equiv="Content-Security-Policy" content="script-src 'self' player.vimeo.com/ cdn.rudderlabs.com/ js.stripe.com/v3([^"]*)">`)
if results := reCSP.FindAllString(newRootHTML, -1); len(results) == 0 {
return fmt.Errorf("failed to find 'Content-Security-Policy' meta tag to rewrite")
}
newRootHTML = reCSP.ReplaceAllLiteralString(newRootHTML, fmt.Sprintf(
`<meta http-equiv="Content-Security-Policy" content="script-src 'self' player.vimeo.com/ cdn.rudderlabs.com/ js.stripe.com/v3%s">`,
GetSubpathScriptHash(subpath),
))
// Rewrite the root.html references to `/static/*` to include the given subpath.
// This potentially includes a previously injected inline script that needs to
// be updated (and isn't covered by the cases above).
newRootHTML = strings.Replace(newRootHTML, pathToReplace, newPath, -1)
if alreadyRewritten && subpath == "/" {
// Remove the injected script since no longer required. Note that the rewrite above
// will have affected the script, so look for the new subpath, not the old one.
oldScript := getSubpathScript(subpath)
newRootHTML = strings.Replace(newRootHTML, fmt.Sprintf("</style><script>%s</script>", oldScript), "</style>", 1)
} else if !alreadyRewritten && subpath != "/" {
// Otherwise, inject the script to define `window.publicPath`.
script := getSubpathScript(subpath)
newRootHTML = strings.Replace(newRootHTML, "</style>", fmt.Sprintf("</style><script>%s</script>", script), 1)
}
// Write out the updated root.html.
if err = ioutil.WriteFile(rootHTMLPath, []byte(newRootHTML), 0); err != nil {
return errors.Wrapf(err, "failed to update root.html with subpath %s", subpath)
}
// Rewrite the manifest.json and *.css references to `/static/*` (or a previously rewritten subpath).
err = filepath.Walk(staticDir, func(walkPath string, info os.FileInfo, err error) error {
if filepath.Base(walkPath) == "manifest.json" || filepath.Ext(walkPath) == ".css" {
old, err := ioutil.ReadFile(walkPath)
if err != nil {
return errors.Wrapf(err, "failed to open %s", walkPath)
}
new := strings.Replace(string(old), pathToReplace, newPath, -1)
if err = ioutil.WriteFile(walkPath, []byte(new), 0); err != nil {
return errors.Wrapf(err, "failed to update %s with subpath %s", walkPath, subpath)
}
}
return nil
})
if err != nil {
return errors.Wrapf(err, "error walking %s", staticDir)
}
return nil
}
// UpdateAssetsSubpath rewrites assets in the /client directory to assume the application is hosted
// at the given subpath instead of at the root. No changes are written unless necessary.
func UpdateAssetsSubpath(subpath string) error {
return UpdateAssetsSubpathInDir(subpath, model.ClientDir)
}
// UpdateAssetsSubpathFromConfig uses UpdateAssetsSubpath and any path defined in the SiteURL.
func UpdateAssetsSubpathFromConfig(config *model.Config) error {
// Don't rewrite in development environments, since webpack in developer mode constantly
// updates the assets and must be configured separately.
if model.BuildNumber == "dev" {
mlog.Debug("Skipping update to assets subpath since dev build")
return nil
}
// Similarly, don't rewrite during a CI build, when the assets may not even be present.
if os.Getenv("IS_CI") == "true" {
mlog.Debug("Skipping update to assets subpath since CI build")
return nil
}
subpath, err := GetSubpathFromConfig(config)
if err != nil {
return err
}
return UpdateAssetsSubpath(subpath)
}
func GetSubpathFromConfig(config *model.Config) (string, error) {
if config == nil {
return "", errors.New("no config provided")
} else if config.ServiceSettings.SiteURL == nil {
return "/", nil
}
u, err := url.Parse(*config.ServiceSettings.SiteURL)
if err != nil {
return "", errors.Wrap(err, "failed to parse SiteURL from config")
}
if u.Path == "" {
return "/", nil
}
return path.Clean(u.Path), nil
}
| [
"\"IS_CI\""
]
| []
| [
"IS_CI"
]
| [] | ["IS_CI"] | go | 1 | 0 | |
openks/distributed/quick-start/openKS_GPU_test.py | # Copyright (c) 2020 Room 525 Research Group, Zhejiang University.
# All Rights Reserved.
import paddle.fluid as fluid
import os
import sys
sys.path.insert(1, os.path.dirname(__file__))
from openKS_distributed import KSDistributedFactory
from openKS_distributed.base import RoleMaker
from openKS_strategy.gpu import GPUStrategy, \
NumThreadsConfig, CollectiveMode, GradAllreduce, LocalSGD
from utils import gen_data
from nets import mlp
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
dist_algorithm = KSDistributedFactory.instantiation(flag = 1)
role = RoleMaker.PaddleCloudRoleMaker(is_collective=True)
dist_algorithm.init(role)
# algorithm + local optimizer
optimizer = GPUStrategy(exec_config = [NumThreadsConfig(32)], dist_config = [CollectiveMode(), GradAllreduce()]).setup_optimizer(dist_algorithm, optimizer)
optimizer.minimize(cost, fluid.default_startup_program())
train_prog = dist_algorithm.main_program
gpu_id = int(os.getenv("FLAGS_selected_gpus", "0"))
place = fluid.CUDAPlace(gpu_id)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
step = 1001
for i in range(step):
cost_val = exe.run(
program=train_prog,
feed=gen_data(),
fetch_list=[cost.name])
print("worker_index: %d, step%d cost = %f" %
(dist_algorithm.worker_index(), i, cost_val[0]))
| []
| []
| [
"FLAGS_selected_gpus"
]
| [] | ["FLAGS_selected_gpus"] | python | 1 | 0 | |
platforms/microbit/temperature_driver_test.go | package microbit
import (
"strings"
"testing"
"time"
"github.com/matipan/gobot"
"github.com/matipan/gobot/gobottest"
)
var _ gobot.Driver = (*TemperatureDriver)(nil)
func initTestTemperatureDriver() *TemperatureDriver {
d := NewTemperatureDriver(NewBleTestAdaptor())
return d
}
func TestTemperatureDriver(t *testing.T) {
d := initTestTemperatureDriver()
gobottest.Assert(t, strings.HasPrefix(d.Name(), "Microbit Temperature"), true)
d.SetName("NewName")
gobottest.Assert(t, d.Name(), "NewName")
}
func TestTemperatureDriverStartAndHalt(t *testing.T) {
d := initTestTemperatureDriver()
gobottest.Assert(t, d.Start(), nil)
gobottest.Assert(t, d.Halt(), nil)
}
func TestTemperatureDriverReadData(t *testing.T) {
sem := make(chan bool, 0)
a := NewBleTestAdaptor()
d := NewTemperatureDriver(a)
d.Start()
d.On(Temperature, func(data interface{}) {
gobottest.Assert(t, data, int8(0x22))
sem <- true
})
a.TestReceiveNotification([]byte{0x22}, nil)
select {
case <-sem:
case <-time.After(100 * time.Millisecond):
t.Errorf("Microbit Event \"Temperature\" was not published")
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
sharness/dependencies/ipfs-update/migrations.go | package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
util "github.com/ipfs/fs-repo-migrations/Godeps/_workspace/src/github.com/ipfs/ipfs-update/util"
stump "github.com/ipfs/fs-repo-migrations/Godeps/_workspace/src/github.com/whyrusleeping/stump"
)
func CheckMigration() error {
stump.Log("checking if repo migration is needed...")
p := util.IpfsDir()
vfilePath := filepath.Join(p, "version")
_, err := os.Stat(vfilePath)
if os.IsNotExist(err) {
stump.VLog(" - no prexisting repo to migrate")
return nil
}
oldverB, err := ioutil.ReadFile(vfilePath)
if err != nil {
return err
}
oldver := strings.Trim(string(oldverB), "\n \t")
stump.VLog(" - old repo version is", oldver)
nbinver, err := util.RunCmd("", "ipfs", "version", "--repo")
if err != nil {
stump.Log("Failed to check new binary repo version.")
stump.VLog("Reason: ", err)
stump.Log("This is not an error.")
stump.Log("This just means that you may have to manually run the migration")
stump.Log("You will be prompted to do so upon starting the ipfs daemon if necessary")
return nil
}
stump.VLog(" - repo version of new binary is ", nbinver)
if oldver != nbinver {
stump.Log(" - Migration required")
return RunMigration(oldver, nbinver)
}
stump.VLog(" - no migration required")
return nil
}
func RunMigration(oldv, newv string) error {
migrateBin := "fs-repo-migrations"
stump.VLog(" - checking for migrations binary...")
_, err := exec.LookPath(migrateBin)
if err != nil {
stump.VLog(" - migrations not found on system, attempting to install")
err := GetMigrations()
if err != nil {
return err
}
}
// check to make sure migrations binary supports our target version
err = verifyMigrationSupportsVersion(newv)
if err != nil {
return err
}
cmd := exec.Command(migrateBin, "-to", newv, "-y")
cmd.Stdout = stump.LogOut
cmd.Stderr = stump.ErrOut
stump.Log("running migration: '%s -to %s -y'", migrateBin, newv)
err = cmd.Run()
if err != nil {
return fmt.Errorf("migration failed: %s", err)
}
stump.Log("migration succeeded!")
return nil
}
func GetMigrations() error {
// first, check if go is installed
_, err := exec.LookPath("go")
if err == nil {
err := getMigrationsGoGet()
if err == nil {
return nil
}
stump.VLog("'go get' migrations failed: %s", err)
}
// TODO: try and fetch from dist.ipfs.io
stump.Log("could not find or install fs-repo-migrations, please manually install it")
stump.Log("before running ipfs-update again.")
return fmt.Errorf("failed to find migrations binary")
}
func getMigrationsGoGet() error {
stump.VLog(" - checking that GOPATH is set")
if os.Getenv("GOPATH") == "" {
return fmt.Errorf("GOPATH not set, cannot install migrations with go tool")
}
stump.VLog(" - fetching migrations using 'go get'")
cmd := exec.Command("go", "get", "-u", "github.com/ipfs/fs-repo-migrations")
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("%s %s", string(out), err)
}
stump.VLog(" - success. verifying...")
// verify we can see the binary now
p, err := exec.LookPath("fs-repo-migrations")
if err != nil {
return fmt.Errorf("install succeeded, but failed to find binary afterwards. (%s)", err)
}
stump.VLog(" - fs-repo-migrations now installed at %s", p)
return nil
}
func verifyMigrationSupportsVersion(v string) error {
stump.VLog(" - verifying migration supports version %s", v)
vn, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("given migration version was not a number: %q", v)
}
sn, err := migrationsVersion()
if err != nil {
return err
}
if sn >= vn {
return nil
}
stump.VLog(" - migrations doesnt support version %s, attempting to update")
err = GetMigrations()
if err != nil {
return err
}
stump.VLog(" - migrations updated")
sn, err = migrationsVersion()
if err != nil {
return err
}
if sn >= vn {
return nil
}
return fmt.Errorf("no known migration supports version %s", v)
}
func migrationsVersion() (int, error) {
out, err := exec.Command("fs-repo-migrations", "-v").CombinedOutput()
if err != nil {
return 0, fmt.Errorf("failed to check migrations version")
}
vs := strings.Trim(string(out), " \n\t")
vn, err := strconv.Atoi(vs)
if err != nil {
return 0, fmt.Errorf("migrations binary version check did not return a number")
}
return vn, nil
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
leetcode/412-fizz-buzz.py | class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
out = []
for i in range(1, n + 1):
if i % 3 == 0 and i % 5 == 0:
out.append('FizzBuzz')
elif i % 5 == 0:
out.append('Buzz')
elif i % 3 == 0:
out.append('Fizz')
else:
out.append(str(i))
return out
| []
| []
| []
| [] | [] | python | null | null | null |
sentences-app/app/app.py | #!/usr/bin/env python
import os
import flask
import requests
import prometheus_client
import datetime, time
import logging
import random
import re
import sys
random_app = flask.Flask('random')
age_app = flask.Flask('age')
name_app = flask.Flask('name')
sentence_app = flask.Flask('sentence')
api_app = flask.Flask('api')
authentication_bug_probability = float(os.getenv('SENTENCE_AUTH_BUG_PROBABILITY', 0.0))
min_age = int(os.getenv('SENTENCE_AGE_MIN', 0))
max_age = int(os.getenv('SENTENCE_AGE_MAX', 100))
names = os.getenv('SENTENCE_NAMES', 'Peter,Ray,Egon').split(',')
svc_delay_min = float(os.getenv('SENTENCE_SVC_DELAY_MIN', 0.0))
svc_delay_max = float(os.getenv('SENTENCE_SVC_DELAY_MAX', 0.0))
mode = os.getenv('SENTENCE_MODE', 'age')
age_svc_url = os.getenv('SENTENCE_AGE_SVC_URL', 'http://age:5000')
name_svc_url = os.getenv('SENTENCE_NAME_SVC_URL', 'http://name:5000')
random_svc_url = os.getenv('SENTENCE_RANDOM_SVC_URL', '')
random_svc_url2 = os.getenv('SENTENCE_RANDOM_SVC_URL2', '')
random_svc2_probability = float(os.getenv('SENTENCE_RANDOM_SVC2_PROBABILITY', 0.0))
auth_z_bug_value = '12345'
api_svc_url = os.getenv('SENTENCE_API_SVC_URL', 'http://api:5000')
api_switch = os.getenv('API_SWITCH', 'false')
fwd_headers = ['x-request-id',
'x-b3-traceid',
'x-b3-spanid',
'x-b3-parentspanid',
'x-b3-sampled',
'x-b3-flags',
'b3',
'x-client-trace-id',
'x-envoy-force-trace',
# Custom headers
'x-test',
'authorization']
class timed():
def __init__(self, txt):
self.txt = txt
def __enter__(self):
self.start = datetime.datetime.now()
def __exit__(self, exc_type, exc_value, exc_traceback):
end = datetime.datetime.now()
took = (end-self.start).total_seconds()*1000
logging.warning("Operation '{}' took {:.3f}ms".format(self.txt, took))
def get_random_int(xmin, xmax):
if random_svc_url:
random_url = random_svc_url
if random_svc_url2:
p = float(random.randint(0,100))/100.0
if p < random_svc2_probability:
random_url = random_svc_url2
with timed('random') as t:
hdrs = get_fwd_headers()
logging.warning('Forwarding headers {}'.format(hdrs))
r = requests.get(random_url, timeout=1, headers=hdrs)
if r.status_code != 200:
flask.abort(r.status_code)
logging.warning("Used random-svc URL {}. Got '{}'".format(random_url, r.text))
if re.match('\d+' ,r.text):
val = int(r.text)
else:
val = ord(r.text)
return xmin + (val % (xmax-xmin+1))
else:
return random.randint(xmin, xmax)
def get_random_age():
return str(get_random_int(min_age, max_age))
def get_random_name():
nidx = get_random_int(0, len(names)-1)
return names[nidx]
def do_random_delay():
d = svc_delay_min + random.random()*(svc_delay_max-svc_delay_min)
if d > 0:
logging.warning('Delay {}s'.format(d))
time.sleep(d)
def flask_lc_headers():
return {k.lower(): v for k,v in flask.request.headers.items()}
def get_fwd_headers():
in_hdrs = flask_lc_headers()
return { h: v for h,v in in_hdrs.items() if h.lower() in fwd_headers}
@api_app.route('/')
def call_api():
with timed('api') as t:
hdrs = get_fwd_headers()
m_requests.labels('api').inc()
do_random_delay()
response = requests.get("http://httpbin.org/delay/1",headers=hdrs)
logging.warning("Response was: {}".format(response.status_code))
return response.text
@random_app.route('/')
def get_random():
with timed('random') as t:
do_random_delay()
in_hdrs = flask_lc_headers()
if 'authorization' not in in_hdrs or in_hdrs['authorization'] == auth_z_bug_value:
logging.warning('Simulating bug due to missing/bad authZ header. Got headers: {}'.format(in_hdrs))
time.sleep(0.7)
m_requests.labels('random').inc()
r = random.randint(0, 10000)
logging.warning("Using random '{}'".format(r))
return str(r)
@age_app.route('/')
def get_age():
with timed('age') as t:
do_random_delay()
m_requests.labels('age').inc()
age = get_random_age()
logging.warning("Using age '{}'".format(age))
return age
@name_app.route('/')
def get_name():
with timed('name') as t:
do_random_delay()
m_requests.labels('name').inc()
name = get_random_name()
logging.warning("Using name '{}'".format(name))
return name
@name_app.route('/choices')
def get_name_choices():
return str(names)
@sentence_app.route('/')
def get_sentence():
with timed('sentence') as t:
do_random_delay()
hdrs = get_fwd_headers()
# Simulate authentication and possibly bug
p = float(random.randint(0,100))/100.0
if p < authentication_bug_probability:
logging.warning('Simulating authentication bug (p={}) - adding wrong header'.format(p))
hdrs['Authorization'] = auth_z_bug_value
else:
hdrs['Authorization'] = 'something-valid'
logging.warning('Forwarding headers {}'.format(hdrs))
name = requests.get(name_svc_url, timeout=1, headers=hdrs).text
age = requests.get(age_svc_url, timeout=1, headers=hdrs).text
m_requests.labels('sentence').inc()
if api_switch=='true':
api = requests.get(api_svc_url, timeout=2, headers=hdrs).text
return '{} is {} years.'.format(name, age)
if __name__ == '__main__':
host = "0.0.0.0"
port = 5000
metrics_port = 8000
m_requests = prometheus_client.Counter('sentence_requests_total',
'Number of requests', ['type'])
prometheus_client.start_http_server(metrics_port)
if mode=='random':
random_app.run(host=host, port=port)
elif mode=='api':
api_app.run(host=host, port=port)
elif mode=='age':
age_app.run(host=host, port=port)
elif mode=='name':
name_app.run(host=host, port=port)
elif mode=='sentence':
sentence_app.run(host=host, port=port)
| []
| []
| [
"SENTENCE_RANDOM_SVC_URL",
"SENTENCE_RANDOM_SVC_URL2",
"SENTENCE_AGE_MIN",
"SENTENCE_SVC_DELAY_MIN",
"SENTENCE_MODE",
"SENTENCE_API_SVC_URL",
"SENTENCE_AUTH_BUG_PROBABILITY",
"SENTENCE_NAME_SVC_URL",
"API_SWITCH",
"SENTENCE_RANDOM_SVC2_PROBABILITY",
"SENTENCE_NAMES",
"SENTENCE_AGE_MAX",
"SENTENCE_AGE_SVC_URL",
"SENTENCE_SVC_DELAY_MAX"
]
| [] | ["SENTENCE_RANDOM_SVC_URL", "SENTENCE_RANDOM_SVC_URL2", "SENTENCE_AGE_MIN", "SENTENCE_SVC_DELAY_MIN", "SENTENCE_MODE", "SENTENCE_API_SVC_URL", "SENTENCE_AUTH_BUG_PROBABILITY", "SENTENCE_NAME_SVC_URL", "API_SWITCH", "SENTENCE_RANDOM_SVC2_PROBABILITY", "SENTENCE_NAMES", "SENTENCE_AGE_MAX", "SENTENCE_AGE_SVC_URL", "SENTENCE_SVC_DELAY_MAX"] | python | 14 | 0 | |
cmd/do.go | // Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/prometheus/client_golang/prometheus"
"github.com/gorilla/mux"
)
// var timeout_env = os.Getenv("TIMEOUT")
var url = os.Getenv("URL")
var verbose = os.Getenv("VERBOSE")
// var method = os.Getenv("METHOD")
// doCmd represents the do command
var doCmd = &cobra.Command{
Use: "do",
Short: "Does the configured http request.",
Run: func(cmd *cobra.Command, args []string) {
//parse from env later
timeout := 1000
//Setup prometheus
httpReqs := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "curl_response_total",
Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
},
[]string{"code", "url", "method"},
)
prometheus.MustRegister(httpReqs)
httpResponseTime := prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "curl_response_time",
Help: "Time taken to respond to request. partitioned by status code and HTTP method.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"code", "url", "method"})
prometheus.MustRegister(httpResponseTime)
go func() {
r := mux.NewRouter()
r.Handle("/metrics", prometheus.InstrumentHandler(
"metrics", prometheus.UninstrumentedHandler(),
))
s := &http.Server{
Addr: ":8080",
ReadTimeout: 8 * time.Second,
WriteTimeout: 8 * time.Second,
MaxHeaderBytes: 1 << 20,
Handler: r,
}
log.Fatal(s.ListenAndServe())
}()
if url == "" {
logrus.Errorf("Must provide URL value via 'URL' environment variable")
os.Exit(1)
}
if !strings.HasPrefix(url, "http") {
url = "http://" + url
}
for {
time.Sleep(time.Duration(timeout) * time.Millisecond)
// Generated by curl-to-Go: https://mholt.github.io/curl-to-go
startTime := time.Now()
resp, err := http.Get(url)
duration := time.Since(startTime)
if err != nil {
logrus.Errorf("Error getting from %v: %v", url, err)
code := -1
if resp != nil {
code = resp.StatusCode
}
httpResponseTime.WithLabelValues(fmt.Sprintf("%v", code), url, "GET").Observe(float64(duration))
continue
// handle err
}
defer resp.Body.Close()
s := fmt.Sprintf("%v %v %v", url, resp.StatusCode, duration)
if verbose == "true" {
body, _ := ioutil.ReadAll(resp.Body)
s += fmt.Sprintf("\n%v", string(body))
}
fmt.Println(s)
httpResponseTime.WithLabelValues(fmt.Sprintf("%v", resp.StatusCode), url, "GET").Observe(float64(duration))
}
},
}
func init() {
rootCmd.AddCommand(doCmd)
}
| [
"\"TIMEOUT\"",
"\"URL\"",
"\"VERBOSE\"",
"\"METHOD\""
]
| []
| [
"URL",
"METHOD",
"TIMEOUT",
"VERBOSE"
]
| [] | ["URL", "METHOD", "TIMEOUT", "VERBOSE"] | go | 4 | 0 | |
tests/python/pants_test/util/test_contextutil.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pstats
import shutil
import signal
import sys
import unittest
import uuid
import zipfile
from contextlib import contextmanager
import mock
from pants.util.contextutil import (HardSystemExit, InvalidZipPath, Timer, environment_as,
exception_logging, hard_exit_handler, hermetic_environment_as,
maybe_profiled, open_zip, pushd, signal_handler_as, stdio_as,
temporary_dir, temporary_file)
from pants.util.process_handler import subprocess
PATCH_OPTS = dict(autospec=True, spec_set=True)
class ContextutilTest(unittest.TestCase):
def test_empty_environment(self):
with environment_as():
pass
def test_override_single_variable(self):
with temporary_file() as output:
# test that the override takes place
with environment_as(HORK='BORK'):
subprocess.Popen([sys.executable, '-c', 'import os; print(os.environ["HORK"])'],
stdout=output).wait()
output.seek(0)
self.assertEquals('BORK\n', output.read())
# test that the variable is cleared
with temporary_file() as new_output:
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=new_output).wait()
new_output.seek(0)
self.assertEquals('False\n', new_output.read())
def test_environment_negation(self):
with temporary_file() as output:
with environment_as(HORK='BORK'):
with environment_as(HORK=None):
# test that the variable is cleared
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=output).wait()
output.seek(0)
self.assertEquals('False\n', output.read())
def test_hermetic_environment(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**{}):
self.assertNotIn('USER', os.environ)
def test_hermetic_environment_subprocesses(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**dict(AAA='333')):
output = subprocess.check_output('env', shell=True)
self.assertNotIn('USER=', output)
self.assertIn('AAA', os.environ)
self.assertEquals(os.environ['AAA'], '333')
self.assertIn('USER', os.environ)
self.assertNotIn('AAA', os.environ)
def test_simple_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir:
with pushd(tempdir) as path:
self.assertEquals(tempdir, path)
self.assertEquals(os.path.realpath(tempdir), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_nested_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir1:
with pushd(tempdir1):
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
with temporary_dir(root_dir=tempdir1) as tempdir2:
with pushd(tempdir2):
self.assertEquals(os.path.realpath(tempdir2), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_temporary_file_no_args(self):
with temporary_file() as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name) == False,
'Temporary file should not exist outside of the context.')
def test_temporary_file_without_cleanup(self):
with temporary_file(cleanup=False) as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name),
'Temporary file should exist outside of context if cleanup=False.')
os.unlink(fp.name)
def test_temporary_file_within_other_dir(self):
with temporary_dir() as path:
with temporary_file(root_dir=path) as f:
self.assertTrue(os.path.realpath(f.name).startswith(os.path.realpath(path)),
'file should be created in root_dir if specified.')
def test_temporary_dir_no_args(self):
with temporary_dir() as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.isdir(path), 'Temporary dir should be a dir and not a file.')
self.assertFalse(os.path.exists(path), 'Temporary dir should not exist outside of the context.')
def test_temporary_dir_without_cleanup(self):
with temporary_dir(cleanup=False) as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.exists(path),
'Temporary dir should exist outside of context if cleanup=False.')
shutil.rmtree(path)
def test_temporary_dir_with_root_dir(self):
with temporary_dir() as path1:
with temporary_dir(root_dir=path1) as path2:
self.assertTrue(os.path.realpath(path2).startswith(os.path.realpath(path1)),
'Nested temporary dir should be created within outer dir.')
def test_timer(self):
class FakeClock(object):
def __init__(self):
self._time = 0.0
def time(self):
ret = self._time
self._time += 0.0001 # Force a little time to elapse.
return ret
def sleep(self, duration):
self._time += duration
clock = FakeClock()
# Note: to test with the real system clock, use this instead:
# import time
# clock = time
with Timer(clock=clock) as t:
self.assertLess(t.start, clock.time())
self.assertGreater(t.elapsed, 0)
clock.sleep(0.1)
self.assertGreater(t.elapsed, 0.1)
clock.sleep(0.1)
self.assertTrue(t.finish is None)
self.assertGreater(t.elapsed, 0.2)
self.assertLess(t.finish, clock.time())
def test_open_zipDefault(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w') as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipTrue(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=True) as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipFalse(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=False) as zf:
self.assertFalse(zf._allowZip64)
def test_open_zip_raises_exception_on_falsey_paths(self):
falsey = (None, '', False)
for invalid in falsey:
with self.assertRaises(InvalidZipPath):
open_zip(invalid).gen.next()
def test_open_zip_returns_realpath_on_badzipfile(self):
# In case of file corruption, deleting a Pants-constructed symlink would not resolve the error.
with temporary_file() as not_zip:
with temporary_dir() as tempdir:
file_symlink = os.path.join(tempdir, 'foo')
os.symlink(not_zip.name, file_symlink)
self.assertEquals(os.path.realpath(file_symlink), os.path.realpath(not_zip.name))
with self.assertRaisesRegexp(zipfile.BadZipfile, r'{}'.format(not_zip.name)):
open_zip(file_symlink).gen.next()
@contextmanager
def _stdio_as_tempfiles(self):
"""Harness to replace `sys.std*` with tempfiles.
Validates that all files are read/written/flushed correctly, and acts as a
contextmanager to allow for recursive tests.
"""
# Prefix contents written within this instance with a unique string to differentiate
# them from other instances.
uuid_str = str(uuid.uuid4())
def u(string):
return '{}#{}'.format(uuid_str, string)
stdin_data = u('stdio')
stdout_data = u('stdout')
stderr_data = u('stderr')
with temporary_file() as tmp_stdin,\
temporary_file() as tmp_stdout,\
temporary_file() as tmp_stderr:
print(stdin_data, file=tmp_stdin)
tmp_stdin.seek(0)
# Read prepared content from stdin, and write content to stdout/stderr.
with stdio_as(stdout_fd=tmp_stdout.fileno(),
stderr_fd=tmp_stderr.fileno(),
stdin_fd=tmp_stdin.fileno()):
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(stdin_data, sys.stdin.read().strip())
print(stdout_data, file=sys.stdout)
yield
print(stderr_data, file=sys.stderr)
tmp_stdout.seek(0)
tmp_stderr.seek(0)
self.assertEquals(stdout_data, tmp_stdout.read().strip())
self.assertEquals(stderr_data, tmp_stderr.read().strip())
def test_stdio_as(self):
self.assertTrue(sys.stderr.fileno() > 2,
"Expected a pseudofile as stderr, got: {}".format(sys.stderr))
old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin
# The first level tests that when `sys.std*` are file-likes (in particular, the ones set up in
# pytest's harness) rather than actual files, we stash and restore them properly.
with self._stdio_as_tempfiles():
# The second level stashes the first level's actual file objects and then re-opens them.
with self._stdio_as_tempfiles():
pass
# Validate that after the second level completes, the first level still sees valid
# fds on `sys.std*`.
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(sys.stdout, old_stdout)
self.assertEquals(sys.stderr, old_stderr)
self.assertEquals(sys.stdin, old_stdin)
def test_stdio_as_dev_null(self):
# Capture output to tempfiles.
with self._stdio_as_tempfiles():
# Read/write from/to `/dev/null`, which will be validated by the harness as not
# affecting the tempfiles.
with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
self.assertEquals(b'', sys.stdin.read())
print('garbage', file=sys.stdout)
print('garbage', file=sys.stderr)
def test_signal_handler_as(self):
mock_initial_handler = 1
mock_new_handler = 2
with mock.patch('signal.signal', **PATCH_OPTS) as mock_signal:
mock_signal.return_value = mock_initial_handler
try:
with signal_handler_as(signal.SIGUSR2, mock_new_handler):
raise NotImplementedError('blah')
except NotImplementedError:
pass
self.assertEquals(mock_signal.call_count, 2)
mock_signal.assert_has_calls([
mock.call(signal.SIGUSR2, mock_new_handler),
mock.call(signal.SIGUSR2, mock_initial_handler)
])
def test_permissions(self):
with temporary_file(permissions=0700) as f:
self.assertEquals(0700, os.stat(f.name)[0] & 0777)
with temporary_dir(permissions=0644) as path:
self.assertEquals(0644, os.stat(path)[0] & 0777)
def test_exception_logging(self):
fake_logger = mock.Mock()
with self.assertRaises(AssertionError):
with exception_logging(fake_logger, 'error!'):
assert True is False
fake_logger.exception.assert_called_once_with('error!')
def test_maybe_profiled(self):
with temporary_dir() as td:
profile_path = os.path.join(td, 'profile.prof')
with maybe_profiled(profile_path):
for _ in range(5):
print('test')
# Ensure the profile data was written.
self.assertTrue(os.path.exists(profile_path))
# Ensure the profile data is valid.
pstats.Stats(profile_path).print_stats()
def test_hard_exit_handler(self):
with mock.patch('os._exit', **PATCH_OPTS) as mock_exit:
with hard_exit_handler():
raise HardSystemExit()
mock_exit.assert_called_once_with(0)
| []
| []
| [
"AAA",
"HORK"
]
| [] | ["AAA", "HORK"] | python | 2 | 0 | |
functions/app.py | import os
import json
import requests
from jinja2 import Template
from aws_lambda_powertools import Logger
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.data_classes import EventBridgeEvent
from aws_lambda_powertools.utilities.data_classes import APIGatewayProxyEvent
logger = Logger()
@lambda_handler_decorator
def middleware_before_after(handler, event, context):
try:
# logic_before_handler_execution()
response = handler(event, context)
# logic_after_handler_execution()
return response
except Exception as e:
logger.error(e)
raise e
@middleware_before_after
def lambda_handler(event, context):
""" EventBridge Handler """
event: EventBridgeEvent = EventBridgeEvent(event)
mm_list_channels()
return 0
@middleware_before_after
def api_handler(event, context):
""" API Gateway Handler """
# print(event)
event: APIGatewayProxyEvent = APIGatewayProxyEvent(event)
mm_list_channels(event['queryStringParameters'])
return {
'statusCode': 200,
'body': '{"message": "Channel list OK."}'
}
def mm_list_channels(params: dict = {}):
# print(params)
mm_channels = MmChannels(
os.getenv('MM_TOKEN'),
os.getenv('MM_BASE_URL'),
os.getenv('MM_TEAM_ID') if not params else params['team_id'],
os.getenv('MM_POST_CHANNEL_ID') if not params else params['channel_id'],
None if not params else params['user_id'],
False if not params else True
)
logger.info(mm_channels)
mm_channels()
MM_POST_TEXT_TMPL = """
| # | channel | display_name | header | purpose |
|:-:|:--|:--|:--|:--|
{%- set ns = namespace(idx = 1) -%}
{% for c in chs %}
| {{ ns.idx }} | ~{{ c['name'] }} | {{ c['display_name'] }} | {{ c['header'] }} | {{ c['purpose'] }} |
{%- set ns.idx = ns.idx + 1 -%}
{%- endfor %}
"""
class MmChannels:
__slot__ = [
'token',
'base_url',
'team_id',
'post_channel_id',
'user_id',
'ephemeral',
'mm_channels_api_url',
'mm_post_api_url',
'channels'
]
def __str__(self):
return f'Public channel list post. base_url: {self.base_url}, team_id: {self.team_id}, post_channel_id: {self.post_channel_id}, user_id: {self.user_id}, mm_channels_api_url: {self.mm_channels_api_url}, mm_post_api_url: {self.mm_post_api_url}'
def __init__(self, _token: str, _base_url: str, _team_id: str, _post_channel_id: str, _user_id: str, _ephemeral: bool):
self.token = _token
self.base_url = _base_url
self.team_id = _team_id
self.post_channel_id = _post_channel_id
self.user_id = _user_id
self.ephemeral = _ephemeral
self.mm_channels_api_url = f'{_base_url}/api/v4/teams/{self.team_id}/channels'
self.mm_post_api_url = f'{_base_url}/api/v4/posts' + ('/ephemeral' if self.ephemeral else '')
def channel_list(self) -> None:
_channel_list = []
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json'
}
page = 0
while True:
params = {'page': page, 'per_page': 10}
response = requests.get(self.mm_channels_api_url, headers=headers, params=params)
status = response.status_code
if status == 200:
_channel_list += [
{
'name': d['name'],
'display_name': d['display_name'],
'lower_display_name': d['display_name'].lower(),
'header': d['header'].replace('\n', '').replace('https://', ''),
'purpose': d['purpose'].replace('\n', '').replace('https://', ''),
} for d in response.json()]
else:
logger.error(response.json())
raise Exception(status)
if len(response.json()) < 10:
break
page += 1
self.channels = _channel_list
def sorted_channels(self) -> None:
self.channels = sorted(self.channels, key=lambda x: x['name'])
def post_text(self) -> str:
template = Template(MM_POST_TEXT_TMPL)
return template.render(chs=self.channels)
def post(self, _post_text: str) -> None:
# print(_post_text)
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json'
}
_params = {
'channel_id': self.post_channel_id,
'message': _post_text,
}
if self.ephemeral:
params = {
'user_id': self.user_id,
'post': _params
}
else:
params = _params
# print(params)
response = requests.post(self.mm_post_api_url, headers=headers, json=params)
if (response.status_code != 201):
logger.error(response.json())
raise Exception(response.status_code)
def __call__(self):
self.channel_list()
# print(self.channels)
self.sorted_channels()
# print(self.channels)
self.post(self.post_text())
| []
| []
| [
"MM_TOKEN",
"MM_POST_CHANNEL_ID",
"MM_TEAM_ID",
"MM_BASE_URL"
]
| [] | ["MM_TOKEN", "MM_POST_CHANNEL_ID", "MM_TEAM_ID", "MM_BASE_URL"] | python | 4 | 0 | |
pyramid_oidc/configuration.py | import logging
import os
from pyramid_oidc.exceptions import MissingConfiguration
log = logging.getLogger(__name__)
# Development mode no SSL
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
CONFIG_CLIENT_ID = 'oidc.client_id'
CONFIG_CLIENT_SECRET = 'oidc.client_secret'
CONFIG_OP_AUTHZ_URI = 'oidc.op_authz_uri'
CONFIG_OP_TOKEN_URI = 'oidc.op_token_uri'
CONFIG_OP_PUBLIC_KEY = 'oidc.op_public_key'
CONFIG_OP_USERINFO_URI = 'oidc.op_userinfo_uri'
REQUIRED_CONFIG = (
CONFIG_CLIENT_ID,
CONFIG_CLIENT_SECRET,
CONFIG_OP_AUTHZ_URI,
CONFIG_OP_PUBLIC_KEY,
CONFIG_OP_TOKEN_URI,
CONFIG_OP_USERINFO_URI)
def includeme(config):
validate_config(config.registry.settings)
config.add_route('oidc_authn', '/oidc_authn')
config.add_route('oidc_callback', '/oidc_callback')
config.scan('pyramid_oidc.views')
def validate_config(settings):
missing_options = [
option
for option in REQUIRED_CONFIG
if option not in settings]
if missing_options:
raise MissingConfiguration(missing_options)
| []
| []
| [
"OAUTHLIB_INSECURE_TRANSPORT"
]
| [] | ["OAUTHLIB_INSECURE_TRANSPORT"] | python | 1 | 0 | |
cmd/alertmanager/main.go | // Copyright 2015 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"github.com/prometheus/alertmanager/notify/slackV2"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/route"
"github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web"
webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/prometheus/alertmanager/api"
"github.com/prometheus/alertmanager/cluster"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/dispatch"
"github.com/prometheus/alertmanager/inhibit"
"github.com/prometheus/alertmanager/nflog"
"github.com/prometheus/alertmanager/notify"
"github.com/prometheus/alertmanager/notify/email"
"github.com/prometheus/alertmanager/notify/opsgenie"
"github.com/prometheus/alertmanager/notify/pagerduty"
"github.com/prometheus/alertmanager/notify/pushover"
"github.com/prometheus/alertmanager/notify/sigma"
"github.com/prometheus/alertmanager/notify/slack"
"github.com/prometheus/alertmanager/notify/sns"
"github.com/prometheus/alertmanager/notify/victorops"
"github.com/prometheus/alertmanager/notify/webhook"
"github.com/prometheus/alertmanager/notify/wechat"
"github.com/prometheus/alertmanager/provider/mem"
"github.com/prometheus/alertmanager/silence"
"github.com/prometheus/alertmanager/template"
"github.com/prometheus/alertmanager/timeinterval"
"github.com/prometheus/alertmanager/types"
"github.com/prometheus/alertmanager/ui"
)
var (
requestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "alertmanager_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.05, 0.1, .25, .5, .75, 1, 2, 5, 20, 60},
},
[]string{"handler", "method"},
)
responseSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "alertmanager_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 7),
},
[]string{"handler", "method"},
)
clusterEnabled = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "alertmanager_cluster_enabled",
Help: "Indicates whether the clustering is enabled or not.",
},
)
configuredReceivers = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "alertmanager_receivers",
Help: "Number of configured receivers.",
},
)
configuredIntegrations = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "alertmanager_integrations",
Help: "Number of configured integrations.",
},
)
promlogConfig = promlog.Config{}
)
func init() {
prometheus.MustRegister(requestDuration)
prometheus.MustRegister(responseSize)
prometheus.MustRegister(clusterEnabled)
prometheus.MustRegister(configuredReceivers)
prometheus.MustRegister(configuredIntegrations)
prometheus.MustRegister(version.NewCollector("alertmanager"))
}
func instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
handlerLabel := prometheus.Labels{"handler": handlerName}
return promhttp.InstrumentHandlerDuration(
requestDuration.MustCurryWith(handlerLabel),
promhttp.InstrumentHandlerResponseSize(
responseSize.MustCurryWith(handlerLabel),
handler,
),
)
}
const defaultClusterAddr = "0.0.0.0:9094"
// buildReceiverIntegrations builds a list of integration notifiers off of a
// receiver config.
func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, logger log.Logger) ([]notify.Integration, error) {
var (
errs types.MultiError
integrations []notify.Integration
add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) {
n, err := f(log.With(logger, "integration", name))
if err != nil {
errs.Add(err)
return
}
integrations = append(integrations, notify.NewIntegration(n, rs, name, i))
}
)
for i, c := range nc.WebhookConfigs {
add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l) })
}
for i, c := range nc.EmailConfigs {
add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil })
}
for i, c := range nc.PagerdutyConfigs {
add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l) })
}
for i, c := range nc.OpsGenieConfigs {
add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l) })
}
for i, c := range nc.WechatConfigs {
add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l) })
}
for i, c := range nc.SlackConfigs {
add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l) })
}
for i, c := range nc.SlackConfigV2 {
add("slackV2", i, c, func(l log.Logger) (notify.Notifier, error) { return slackV2.New(c, tmpl, l) })
}
for i, c := range nc.VictorOpsConfigs {
add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l) })
}
for i, c := range nc.PushoverConfigs {
add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l) })
}
for i, c := range nc.SNSConfigs {
add("sns", i, c, func(l log.Logger) (notify.Notifier, error) { return sns.New(c, tmpl, l) })
}
for i, c := range nc.SigmaConfigs {
add("sigma", i, c, func(l log.Logger) (notify.Notifier, error) { return sigma.New(c, tmpl, l) })
}
if errs.Len() > 0 {
return nil, &errs
}
return integrations, nil
}
func main() {
os.Exit(run())
}
func run() int {
if os.Getenv("DEBUG") != "" {
runtime.SetBlockProfileRate(20)
runtime.SetMutexProfileFraction(20)
}
var (
configFile = kingpin.Flag("config.file", "Alertmanager configuration file name.").Default("alertmanager.yml").String()
dataDir = kingpin.Flag("storage.path", "Base path for data storage.").Default("data/").String()
retention = kingpin.Flag("data.retention", "How long to keep data for.").Default("120h").Duration()
alertGCInterval = kingpin.Flag("alerts.gc-interval", "Interval between alert GC.").Default("30m").Duration()
webConfig = webflag.AddFlags(kingpin.CommandLine)
externalURL = kingpin.Flag("web.external-url", "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.").String()
routePrefix = kingpin.Flag("web.route-prefix", "Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").String()
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for the web interface and API.").Default(":9093").String()
getConcurrency = kingpin.Flag("web.get-concurrency", "Maximum number of GET requests processed concurrently. If negative or zero, the limit is GOMAXPROC or 8, whichever is larger.").Default("0").Int()
httpTimeout = kingpin.Flag("web.timeout", "Timeout for HTTP requests. If negative or zero, no timeout is set.").Default("0").Duration()
clusterBindAddr = kingpin.Flag("cluster.listen-address", "Listen address for cluster. Set to empty string to disable HA mode.").
Default(defaultClusterAddr).String()
clusterAdvertiseAddr = kingpin.Flag("cluster.advertise-address", "Explicit address to advertise in cluster.").String()
peers = kingpin.Flag("cluster.peer", "Initial peers (may be repeated).").Strings()
peerTimeout = kingpin.Flag("cluster.peer-timeout", "Time to wait between peers to send notifications.").Default("15s").Duration()
gossipInterval = kingpin.Flag("cluster.gossip-interval", "Interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across the cluster more quickly at the expense of increased bandwidth.").Default(cluster.DefaultGossipInterval.String()).Duration()
pushPullInterval = kingpin.Flag("cluster.pushpull-interval", "Interval for gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.").Default(cluster.DefaultPushPullInterval.String()).Duration()
tcpTimeout = kingpin.Flag("cluster.tcp-timeout", "Timeout for establishing a stream connection with a remote node for a full state sync, and for stream read and write operations.").Default(cluster.DefaultTcpTimeout.String()).Duration()
probeTimeout = kingpin.Flag("cluster.probe-timeout", "Timeout to wait for an ack from a probed node before assuming it is unhealthy. This should be set to 99-percentile of RTT (round-trip time) on your network.").Default(cluster.DefaultProbeTimeout.String()).Duration()
probeInterval = kingpin.Flag("cluster.probe-interval", "Interval between random node probes. Setting this lower (more frequent) will cause the cluster to detect failed nodes more quickly at the expense of increased bandwidth usage.").Default(cluster.DefaultProbeInterval.String()).Duration()
settleTimeout = kingpin.Flag("cluster.settle-timeout", "Maximum time to wait for cluster connections to settle before evaluating notifications.").Default(cluster.DefaultPushPullInterval.String()).Duration()
reconnectInterval = kingpin.Flag("cluster.reconnect-interval", "Interval between attempting to reconnect to lost peers.").Default(cluster.DefaultReconnectInterval.String()).Duration()
peerReconnectTimeout = kingpin.Flag("cluster.reconnect-timeout", "Length of time to attempt to reconnect to a lost peer.").Default(cluster.DefaultReconnectTimeout.String()).Duration()
tlsConfigFile = kingpin.Flag("cluster.tls-config", "[EXPERIMENTAL] Path to config yaml file that can enable mutual TLS within the gossip protocol.").Default("").String()
allowInsecureAdvertise = kingpin.Flag("cluster.allow-insecure-public-advertise-address-discovery", "[EXPERIMENTAL] Allow alertmanager to discover and listen on a public IP address.").Bool()
)
promlogflag.AddFlags(kingpin.CommandLine, &promlogConfig)
kingpin.CommandLine.UsageWriter(os.Stdout)
kingpin.Version(version.Print("alertmanager"))
kingpin.CommandLine.GetFlag("help").Short('h')
kingpin.Parse()
logger := promlog.New(&promlogConfig)
level.Info(logger).Log("msg", "Starting Alertmanager", "version", version.Info())
level.Info(logger).Log("build_context", version.BuildContext())
err := os.MkdirAll(*dataDir, 0777)
if err != nil {
level.Error(logger).Log("msg", "Unable to create data directory", "err", err)
return 1
}
tlsTransportConfig, err := cluster.GetTLSTransportConfig(*tlsConfigFile)
if err != nil {
level.Error(logger).Log("msg", "unable to initialize TLS transport configuration for gossip mesh", "err", err)
return 1
}
var peer *cluster.Peer
if *clusterBindAddr != "" {
peer, err = cluster.Create(
log.With(logger, "component", "cluster"),
prometheus.DefaultRegisterer,
*clusterBindAddr,
*clusterAdvertiseAddr,
*peers,
true,
*pushPullInterval,
*gossipInterval,
*tcpTimeout,
*probeTimeout,
*probeInterval,
tlsTransportConfig,
*allowInsecureAdvertise,
)
if err != nil {
level.Error(logger).Log("msg", "unable to initialize gossip mesh", "err", err)
return 1
}
clusterEnabled.Set(1)
}
stopc := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
notificationLogOpts := []nflog.Option{
nflog.WithRetention(*retention),
nflog.WithSnapshot(filepath.Join(*dataDir, "nflog")),
nflog.WithMaintenance(15*time.Minute, stopc, wg.Done, nil),
nflog.WithMetrics(prometheus.DefaultRegisterer),
nflog.WithLogger(log.With(logger, "component", "nflog")),
}
notificationLog, err := nflog.New(notificationLogOpts...)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
if peer != nil {
c := peer.AddState("nfl", notificationLog, prometheus.DefaultRegisterer)
notificationLog.SetBroadcast(c.Broadcast)
}
marker := types.NewMarker(prometheus.DefaultRegisterer)
silenceOpts := silence.Options{
SnapshotFile: filepath.Join(*dataDir, "silences"),
Retention: *retention,
Logger: log.With(logger, "component", "silences"),
Metrics: prometheus.DefaultRegisterer,
}
silences, err := silence.New(silenceOpts)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
if peer != nil {
c := peer.AddState("sil", silences, prometheus.DefaultRegisterer)
silences.SetBroadcast(c.Broadcast)
}
// Start providers before router potentially sends updates.
wg.Add(1)
go func() {
silences.Maintenance(15*time.Minute, filepath.Join(*dataDir, "silences"), stopc, nil)
wg.Done()
}()
defer func() {
close(stopc)
wg.Wait()
}()
// Peer state listeners have been registered, now we can join and get the initial state.
if peer != nil {
err = peer.Join(
*reconnectInterval,
*peerReconnectTimeout,
)
if err != nil {
level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err)
}
ctx, cancel := context.WithTimeout(context.Background(), *settleTimeout)
defer func() {
cancel()
if err := peer.Leave(10 * time.Second); err != nil {
level.Warn(logger).Log("msg", "unable to leave gossip mesh", "err", err)
}
}()
go peer.Settle(ctx, *gossipInterval*10)
}
alerts, err := mem.NewAlerts(context.Background(), marker, *alertGCInterval, nil, logger)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
defer alerts.Close()
var disp *dispatch.Dispatcher
defer disp.Stop()
groupFn := func(routeFilter func(*dispatch.Route) bool, alertFilter func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[model.Fingerprint][]string) {
return disp.Groups(routeFilter, alertFilter)
}
// An interface value that holds a nil concrete value is non-nil.
// Therefore we explicly pass an empty interface, to detect if the
// cluster is not enabled in notify.
var clusterPeer cluster.ClusterPeer
if peer != nil {
clusterPeer = peer
}
api, err := api.New(api.Options{
Alerts: alerts,
Silences: silences,
StatusFunc: marker.Status,
Peer: clusterPeer,
Timeout: *httpTimeout,
Concurrency: *getConcurrency,
Logger: log.With(logger, "component", "api"),
Registry: prometheus.DefaultRegisterer,
GroupFunc: groupFn,
})
if err != nil {
level.Error(logger).Log("err", errors.Wrap(err, "failed to create API"))
return 1
}
amURL, err := extURL(logger, os.Hostname, *listenAddress, *externalURL)
if err != nil {
level.Error(logger).Log("msg", "failed to determine external URL", "err", err)
return 1
}
level.Debug(logger).Log("externalURL", amURL.String())
waitFunc := func() time.Duration { return 0 }
if peer != nil {
waitFunc = clusterWait(peer, *peerTimeout)
}
timeoutFunc := func(d time.Duration) time.Duration {
if d < notify.MinTimeout {
d = notify.MinTimeout
}
return d + waitFunc()
}
var (
inhibitor *inhibit.Inhibitor
tmpl *template.Template
)
dispMetrics := dispatch.NewDispatcherMetrics(false, prometheus.DefaultRegisterer)
pipelineBuilder := notify.NewPipelineBuilder(prometheus.DefaultRegisterer)
configLogger := log.With(logger, "component", "configuration")
configCoordinator := config.NewCoordinator(
*configFile,
prometheus.DefaultRegisterer,
configLogger,
)
configCoordinator.Subscribe(func(conf *config.Config) error {
tmpl, err = template.FromGlobs(conf.Templates...)
if err != nil {
return errors.Wrap(err, "failed to parse templates")
}
tmpl.ExternalURL = amURL
// Build the routing tree and record which receivers are used.
routes := dispatch.NewRoute(conf.Route, nil)
activeReceivers := make(map[string]struct{})
routes.Walk(func(r *dispatch.Route) {
activeReceivers[r.RouteOpts.Receiver] = struct{}{}
})
// Build the map of receiver to integrations.
receivers := make(map[string][]notify.Integration, len(activeReceivers))
var integrationsNum int
for _, rcv := range conf.Receivers {
if _, found := activeReceivers[rcv.Name]; !found {
// No need to build a receiver if no route is using it.
level.Info(configLogger).Log("msg", "skipping creation of receiver not referenced by any route", "receiver", rcv.Name)
continue
}
integrations, err := buildReceiverIntegrations(rcv, tmpl, logger)
if err != nil {
return err
}
// rcv.Name is guaranteed to be unique across all receivers.
receivers[rcv.Name] = integrations
integrationsNum += len(integrations)
}
// Build the map of time interval names to mute time definitions.
muteTimes := make(map[string][]timeinterval.TimeInterval, len(conf.MuteTimeIntervals))
for _, ti := range conf.MuteTimeIntervals {
muteTimes[ti.Name] = ti.TimeIntervals
}
inhibitor.Stop()
disp.Stop()
inhibitor = inhibit.NewInhibitor(alerts, conf.InhibitRules, marker, logger)
silencer := silence.NewSilencer(silences, marker, logger)
// An interface value that holds a nil concrete value is non-nil.
// Therefore we explicly pass an empty interface, to detect if the
// cluster is not enabled in notify.
var pipelinePeer notify.Peer
if peer != nil {
pipelinePeer = peer
}
pipeline := pipelineBuilder.New(
receivers,
waitFunc,
inhibitor,
silencer,
muteTimes,
notificationLog,
pipelinePeer,
)
configuredReceivers.Set(float64(len(activeReceivers)))
configuredIntegrations.Set(float64(integrationsNum))
api.Update(conf, func(labels model.LabelSet) {
inhibitor.Mutes(labels)
silencer.Mutes(labels)
})
disp = dispatch.NewDispatcher(alerts, routes, pipeline, marker, timeoutFunc, nil, logger, dispMetrics)
routes.Walk(func(r *dispatch.Route) {
if r.RouteOpts.RepeatInterval > *retention {
level.Warn(configLogger).Log(
"msg",
"repeat_interval is greater than the data retention period. It can lead to notifications being repeated more often than expected.",
"repeat_interval",
r.RouteOpts.RepeatInterval,
"retention",
*retention,
"route",
r.Key(),
)
}
})
go disp.Run()
go inhibitor.Run()
return nil
})
if err := configCoordinator.Reload(); err != nil {
return 1
}
// Make routePrefix default to externalURL path if empty string.
if *routePrefix == "" {
*routePrefix = amURL.Path
}
*routePrefix = "/" + strings.Trim(*routePrefix, "/")
level.Debug(logger).Log("routePrefix", *routePrefix)
router := route.New().WithInstrumentation(instrumentHandler)
if *routePrefix != "/" {
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, *routePrefix, http.StatusFound)
})
router = router.WithPrefix(*routePrefix)
}
webReload := make(chan chan error)
ui.Register(router, webReload, logger)
mux := api.Register(router, *routePrefix)
srv := &http.Server{Addr: *listenAddress, Handler: mux}
srvc := make(chan struct{})
go func() {
level.Info(logger).Log("msg", "Listening", "address", *listenAddress)
if err := web.ListenAndServe(srv, *webConfig, logger); err != http.ErrServerClosed {
level.Error(logger).Log("msg", "Listen error", "err", err)
close(srvc)
}
defer func() {
if err := srv.Close(); err != nil {
level.Error(logger).Log("msg", "Error on closing the server", "err", err)
}
}()
}()
var (
hup = make(chan os.Signal, 1)
hupReady = make(chan bool)
term = make(chan os.Signal, 1)
)
signal.Notify(hup, syscall.SIGHUP)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
go func() {
<-hupReady
for {
select {
case <-hup:
// ignore error, already logged in `reload()`
_ = configCoordinator.Reload()
case errc := <-webReload:
errc <- configCoordinator.Reload()
}
}
}()
// Wait for reload or termination signals.
close(hupReady) // Unblock SIGHUP handler.
for {
select {
case <-term:
level.Info(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
return 0
case <-srvc:
return 1
}
}
}
// clusterWait returns a function that inspects the current peer state and returns
// a duration of one base timeout for each peer with a higher ID than ourselves.
func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration {
return func() time.Duration {
return time.Duration(p.Position()) * timeout
}
}
func extURL(logger log.Logger, hostnamef func() (string, error), listen, external string) (*url.URL, error) {
if external == "" {
hostname, err := hostnamef()
if err != nil {
return nil, err
}
_, port, err := net.SplitHostPort(listen)
if err != nil {
return nil, err
}
if port == "" {
level.Warn(logger).Log("msg", "no port found for listen address", "address", listen)
}
external = fmt.Sprintf("http://%s:%s/", hostname, port)
}
u, err := url.Parse(external)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, errors.Errorf("%q: invalid %q scheme, only 'http' and 'https' are supported", u.String(), u.Scheme)
}
ppref := strings.TrimRight(u.Path, "/")
if ppref != "" && !strings.HasPrefix(ppref, "/") {
ppref = "/" + ppref
}
u.Path = ppref
return u, nil
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
vendor/github.com/go-openapi/validate/messages_test.go | // Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validate
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"testing"
"github.com/go-openapi/loads"
"github.com/go-openapi/loads/fmts"
"github.com/go-openapi/strfmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
var (
// This debug environment variable allows to report and capture actual validation messages
// during testing. It should be disabled (undefined) during CI tests.
DebugTest = os.Getenv("SWAGGER_DEBUG_TEST") != ""
)
func init() {
loads.AddLoader(fmts.YAMLMatcher, fmts.YAMLDoc)
}
type ExpectedMessage struct {
Message string `yaml:"message"`
WithContinueOnErrors bool `yaml:"withContinueOnErrors"` // should be expected only when SetContinueOnErrors(true)
IsRegexp bool `yaml:"isRegexp"` // expected message is interpreted as regexp (with regexp.MatchString())
}
type ExpectedFixture struct {
Comment string `yaml:"comment,omitempty"`
Todo string `yaml:"todo,omitempty"`
ExpectedLoadError bool `yaml:"expectedLoadError"` // expect error on load: skip validate step
ExpectedValid bool `yaml:"expectedValid"` // expect valid spec
ExpectedMessages []ExpectedMessage `yaml:"expectedMessages"`
ExpectedWarnings []ExpectedMessage `yaml:"expectedWarnings"`
Tested bool `yaml:"-"`
Failed bool `yaml:"-"`
}
type ExpectedMap map[string]*ExpectedFixture
// Test message improvements, issue #44 and some more
// ContinueOnErrors mode on
// WARNING: this test is very demanding and constructed with varied scenarios,
// which are not necessarily "unitary". Expect multiple changes in messages whenever
// altering the validator.
func Test_MessageQualityContinueOnErrors_Issue44(t *testing.T) {
if !enableLongTests {
skipNotify(t)
t.SkipNow()
}
errs := testMessageQuality(t, true, true) /* set haltOnErrors=true to iterate spec by spec */
assert.Zero(t, errs, "Message testing didn't match expectations")
}
// ContinueOnErrors mode off
func Test_MessageQualityStopOnErrors_Issue44(t *testing.T) {
if !enableLongTests {
skipNotify(t)
t.SkipNow()
}
errs := testMessageQuality(t, true, false) /* set haltOnErrors=true to iterate spec by spec */
assert.Zero(t, errs, "Message testing didn't match expectations")
}
func loadTestConfig(t *testing.T, fp string) ExpectedMap {
expectedConfig, err := ioutil.ReadFile(fp)
require.NoErrorf(t, err, "cannot read expected messages config file: %v", err)
tested := make(ExpectedMap, 200)
err = yaml.Unmarshal(expectedConfig, &tested)
require.NoErrorf(t, err, "cannot unmarshall expected messages from config file : %v", err)
// Check config
for fixture, expected := range tested {
require.Nil(t, UniqueItems("", "", expected.ExpectedMessages), "duplicate error messages configured for %s", fixture)
require.Nil(t, UniqueItems("", "", expected.ExpectedWarnings), "duplicate warning messages configured for %s", fixture)
}
return tested
}
func testMessageQuality(t *testing.T, haltOnErrors bool, continueOnErrors bool) int {
// Verifies the production of validation error messages in multiple
// spec scenarios.
//
// The objective is to demonstrate that:
// - messages are stable
// - validation continues as much as possible, even in presence of many errors
//
// haltOnErrors is used in dev mode to study and fix testcases step by step (output is pretty verbose)
//
// set SWAGGER_DEBUG_TEST=1 env to get a report of messages at the end of each test.
// expectedMessage{"", false, false},
//
// expected messages and warnings are configured in ./fixtures/validation/expected_messages.yaml
//
var errs int // error count
tested := loadTestConfig(t, filepath.Join("fixtures", "validation", "expected_messages.yaml"))
if err := filepath.Walk(filepath.Join("fixtures", "validation"), testWalkSpecs(t, tested, haltOnErrors, continueOnErrors)); err != nil {
t.Logf("%v", err)
errs++
}
recapTest(t, tested)
return errs
}
func testDebugLog(t *testing.T, thisTest *ExpectedFixture) {
if DebugTest {
if thisTest.Comment != "" {
t.Logf("\tDEVMODE: Comment: %s", thisTest.Comment)
}
if thisTest.Todo != "" {
t.Logf("\tDEVMODE: Todo: %s", thisTest.Todo)
}
}
}
func expectInvalid(t *testing.T, path string, thisTest *ExpectedFixture, continueOnErrors bool) {
// Checking invalid specs
t.Logf("Testing messages for invalid spec: %s", path)
testDebugLog(t, thisTest)
doc, err := loads.Spec(path)
// Check specs with load errors (error is located in pkg loads or spec)
if thisTest.ExpectedLoadError {
// Expect a load error: no further validation may possibly be conducted.
require.Error(t, err, "expected this spec to return a load error")
assert.Equal(t, 0, verifyLoadErrors(t, err, thisTest.ExpectedMessages))
return
}
require.NoError(t, err, "expected this spec to load properly")
// Validate the spec document
validator := NewSpecValidator(doc.Schema(), strfmt.Default)
validator.SetContinueOnErrors(continueOnErrors)
res, warn := validator.Validate(doc)
// Check specs with load errors (error is located in pkg loads or spec)
require.False(t, res.IsValid(), "expected this spec to be invalid")
errs := verifyErrorsVsWarnings(t, res, warn)
errs += verifyErrors(t, res, thisTest.ExpectedMessages, "error", continueOnErrors)
errs += verifyErrors(t, warn, thisTest.ExpectedWarnings, "warning", continueOnErrors)
assert.Equal(t, 0, errs)
if errs > 0 {
t.Logf("Message qualification on spec validation failed for %s", path)
// DEVMODE allows developers to experiment and tune expected results
if DebugTest {
reportTest(t, path, res, thisTest.ExpectedMessages, "error", continueOnErrors)
reportTest(t, path, warn, thisTest.ExpectedWarnings, "warning", continueOnErrors)
}
}
}
func expectValid(t *testing.T, path string, thisTest *ExpectedFixture, continueOnErrors bool) {
// Expecting no message (e.g.valid spec): 0 message expected
t.Logf("Testing valid spec: %s", path)
testDebugLog(t, thisTest)
doc, err := loads.Spec(path)
require.NoError(t, err, "expected this spec to load without error")
validator := NewSpecValidator(doc.Schema(), strfmt.Default)
validator.SetContinueOnErrors(continueOnErrors)
res, warn := validator.Validate(doc)
assert.True(t, res.IsValid(), "expected this spec to be valid")
assert.Lenf(t, res.Errors, 0, "expected no returned errors")
// check warnings
errs := verifyErrors(t, warn, thisTest.ExpectedWarnings, "warning", continueOnErrors)
assert.Equal(t, 0, errs)
if DebugTest && errs > 0 {
reportTest(t, path, res, thisTest.ExpectedMessages, "error", continueOnErrors)
reportTest(t, path, warn, thisTest.ExpectedWarnings, "warning", continueOnErrors)
}
}
func checkMustHalt(t *testing.T, haltOnErrors bool) {
if t.Failed() && haltOnErrors {
assert.FailNow(t, "test halted: stop testing on message checking error mode")
return
}
}
func testWalkSpecs(t *testing.T, tested ExpectedMap, haltOnErrors, continueOnErrors bool) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
thisTest, found := tested[info.Name()]
if info.IsDir() || !found { // skip
return nil
}
t.Run(path, func(t *testing.T) {
if !DebugTest { // when running in dev mode, run serially
t.Parallel()
}
defer func() {
thisTest.Tested = true
thisTest.Failed = t.Failed()
}()
if !thisTest.ExpectedValid {
expectInvalid(t, path, thisTest, continueOnErrors)
checkMustHalt(t, haltOnErrors)
} else {
expectValid(t, path, thisTest, continueOnErrors)
checkMustHalt(t, haltOnErrors)
}
})
return nil
}
}
func recapTest(t *testing.T, config ExpectedMap) {
recapFailed := false
for k, v := range config {
if !v.Tested {
t.Logf("WARNING: %s configured but not tested (fixture not found)", k)
recapFailed = true
} else if v.Failed {
t.Logf("ERROR: %s failed passing messages verification", k)
recapFailed = true
}
}
if !recapFailed {
t.Log("INFO:We are good")
}
}
func reportTest(t *testing.T, path string, res *Result, expectedMessages []ExpectedMessage, msgtype string, continueOnErrors bool) {
// Prints out a recap of error messages. To be enabled during development / test iterations
verifiedErrors := make([]string, 0, 50)
lines := make([]string, 0, 50)
for _, e := range res.Errors {
verifiedErrors = append(verifiedErrors, e.Error())
}
t.Logf("DEVMODE:Recap of returned %s messages while validating %s ", msgtype, path)
for _, v := range verifiedErrors {
status := fmt.Sprintf("Unexpected %s", msgtype)
for _, s := range expectedMessages {
if (s.WithContinueOnErrors && continueOnErrors) || !s.WithContinueOnErrors {
if s.IsRegexp {
if matched, _ := regexp.MatchString(s.Message, v); matched {
status = fmt.Sprintf("Expected %s", msgtype)
break
}
} else {
if strings.Contains(v, s.Message) {
status = fmt.Sprintf("Expected %s", msgtype)
break
}
}
}
}
lines = append(lines, fmt.Sprintf("[%s]%s", status, v))
}
for _, s := range expectedMessages {
if (s.WithContinueOnErrors && continueOnErrors) || !s.WithContinueOnErrors {
status := fmt.Sprintf("Missing %s", msgtype)
for _, v := range verifiedErrors {
if s.IsRegexp {
if matched, _ := regexp.MatchString(s.Message, v); matched {
status = fmt.Sprintf("Expected %s", msgtype)
break
}
} else {
if strings.Contains(v, s.Message) {
status = fmt.Sprintf("Expected %s", msgtype)
break
}
}
}
if status != fmt.Sprintf("Expected %s", msgtype) {
lines = append(lines, fmt.Sprintf("[%s]%s", status, s.Message))
}
}
}
if len(lines) > 0 {
sort.Strings(lines)
for _, line := range lines {
t.Logf(line)
}
}
}
func verifyErrorsVsWarnings(t *testing.T, res, warn *Result) int {
// First verification of result conventions: results are redundant, just a matter of presentation
w := len(warn.Errors)
if !assert.Len(t, res.Warnings, w) ||
!assert.Len(t, warn.Warnings, 0) ||
!assert.Subset(t, res.Warnings, warn.Errors) ||
!assert.Subset(t, warn.Errors, res.Warnings) {
t.Log("Result equivalence errors vs warnings not verified")
return 1
}
return 0
}
func verifyErrors(t *testing.T, res *Result, expectedMessages []ExpectedMessage, msgtype string, continueOnErrors bool) int {
var numExpected, errs int
verifiedErrors := make([]string, 0, 50)
for _, e := range res.Errors {
verifiedErrors = append(verifiedErrors, e.Error())
}
for _, s := range expectedMessages {
if (s.WithContinueOnErrors == true && continueOnErrors == true) || s.WithContinueOnErrors == false {
numExpected++
}
}
// We got the expected number of messages (e.g. no duplicates, no uncontrolled side-effect, ...)
if !assert.Len(t, verifiedErrors, numExpected, "unexpected number of %s messages returned. Wanted %d, got %d", msgtype, numExpected, len(verifiedErrors)) {
errs++
}
// Check that all expected messages are here
for _, s := range expectedMessages {
found := false
if (s.WithContinueOnErrors == true && continueOnErrors == true) || s.WithContinueOnErrors == false {
for _, v := range verifiedErrors {
if s.IsRegexp {
if matched, _ := regexp.MatchString(s.Message, v); matched {
found = true
break
}
} else {
if strings.Contains(v, s.Message) {
found = true
break
}
}
}
if !assert.True(t, found, "Missing expected %s message: %s", msgtype, s.Message) {
errs++
}
}
}
// Check for no unexpected message
for _, v := range verifiedErrors {
found := false
for _, s := range expectedMessages {
if (s.WithContinueOnErrors == true && continueOnErrors == true) || s.WithContinueOnErrors == false {
if s.IsRegexp {
if matched, _ := regexp.MatchString(s.Message, v); matched {
found = true
break
}
} else {
if strings.Contains(v, s.Message) {
found = true
break
}
}
}
}
if !assert.True(t, found, "unexpected %s message: %s", msgtype, v) {
errs++
}
}
return errs
}
func verifyLoadErrors(t *testing.T, err error, expectedMessages []ExpectedMessage) int {
var errs int
// Perform several matches on single error message
// Process here error messages from loads (normally unit tested in the load package:
// we just want to figure out how all this is captured at the validate package level.
v := err.Error()
for _, s := range expectedMessages {
var found bool
if s.IsRegexp {
if found, _ = regexp.MatchString(s.Message, v); found {
break
}
} else {
if found = strings.Contains(v, s.Message); found {
break
}
}
if !assert.True(t, found, "unexpected load error: %s", v) {
t.Logf("Expecting one of the following:")
for _, s := range expectedMessages {
smode := "Contains"
if s.IsRegexp {
smode = "MatchString"
}
t.Logf("[%s]:%s", smode, s.Message)
}
errs++
}
}
return errs
}
func testIssue(t *testing.T, path string, expectedNumErrors, expectedNumWarnings int) {
res, _ := loadAndValidate(t, path)
if expectedNumErrors > -1 && !assert.Len(t, res.Errors, expectedNumErrors) {
t.Log("Returned errors:")
for _, e := range res.Errors {
t.Logf("%v", e)
}
}
if expectedNumWarnings > -1 && !assert.Len(t, res.Warnings, expectedNumWarnings) {
t.Log("Returned warnings:")
for _, e := range res.Warnings {
t.Logf("%v", e)
}
}
}
// Test unitary fixture for dev and bug fixing
func Test_SingleFixture(t *testing.T) {
t.SkipNow()
path := filepath.Join("fixtures", "validation", "fixture-1231.yaml")
testIssue(t, path, -1, -1)
}
| [
"\"SWAGGER_DEBUG_TEST\""
]
| []
| [
"SWAGGER_DEBUG_TEST"
]
| [] | ["SWAGGER_DEBUG_TEST"] | go | 1 | 0 | |
filter-test.py | #!/usr/bin/env python
#
# Test hook to launch an irker instance (if it doesn't already exist)
# just before shipping the notification. We start it in in another terminal
# so you can watch the debug messages. Intended to be used in the root
# directory of the irker repo. Probably only of interest only to irker
# developers
#
# To use this, set up irkerhook.py to fire on each commit. Creating a
# .git/hooks/post-commit file containing the line "irkerhook.py"; be
# sure to make the opos-commit file executable. Then set the
# filtercmd variable in your repo config as follows:
#
# [irker]
# filtercmd = filter-test.py
import os, sys, json, subprocess, time
metadata = json.loads(sys.argv[1])
ps = subprocess.Popen("ps -U %s uh" % os.getenv("LOGNAME"),
shell=True,
stdout=subprocess.PIPE)
data = ps.stdout.read()
irkerd_count = len([x for x in data.split("\n") if x.find("irkerd") != -1])
if irkerd_count:
sys.stderr.write("Using a running irker instance...\n")
else:
sys.stderr.write("Launching a new irker instance...\n")
os.system("gnome-terminal --title 'irkerd' -e 'irkerd -d 2' &")
time.sleep(1.5) # Avoid a race condition
print json.dumps(metadata)
# end
| []
| []
| [
"LOGNAME"
]
| [] | ["LOGNAME"] | python | 1 | 0 | |
manager/env.go | package manager
import (
"os"
)
var refreshTokenState string = os.Getenv("REFRESH_TOKEN_STATE")
var userDataState string = os.Getenv("USER_DATA_STATE")
var userEmailState string = os.Getenv("USER_EMAIL_STATE")
var seed string = os.Getenv("SEED")
var tokenManagerName string = os.Getenv("TOKEN_MANAGER")
| [
"\"REFRESH_TOKEN_STATE\"",
"\"USER_DATA_STATE\"",
"\"USER_EMAIL_STATE\"",
"\"SEED\"",
"\"TOKEN_MANAGER\""
]
| []
| [
"USER_DATA_STATE",
"TOKEN_MANAGER",
"USER_EMAIL_STATE",
"REFRESH_TOKEN_STATE",
"SEED"
]
| [] | ["USER_DATA_STATE", "TOKEN_MANAGER", "USER_EMAIL_STATE", "REFRESH_TOKEN_STATE", "SEED"] | go | 5 | 0 | |
example/example.go | package main
import (
"log"
"net/http"
"os"
"time"
"github.com/go-vk-api/vk"
lp "github.com/go-vk-api/vk/longpoll/user"
)
func main() {
client, err := vk.NewClientWithOptions(
vk.WithToken(os.Getenv("VK_ACCESS_TOKEN")),
vk.WithHTTPClient(&http.Client{Timeout: time.Second * 30}),
)
if err != nil {
log.Panic(err)
}
err = printMe(client)
if err != nil {
log.Panic(err)
}
longpoll, err := lp.NewWithOptions(client, lp.WithMode(lp.ReceiveAttachments))
if err != nil {
log.Panic(err)
}
stream, err := longpoll.GetUpdatesStream(0)
if err != nil {
log.Panic(err)
}
for {
select {
case update, ok := <-stream.Updates:
if !ok {
return
}
switch data := update.Data.(type) {
case *lp.NewMessage:
if data.Text == "/hello" {
var sentMessageID int64
if err = client.CallMethod("messages.send", vk.RequestParams{
"peer_id": data.PeerID,
"message": "Hello!",
"forward_messages": data.ID,
"random_id": 0,
}, &sentMessageID); err != nil {
log.Panic(err)
}
log.Println(sentMessageID)
}
}
case err, ok := <-stream.Errors:
if ok {
// stream.Stop()
log.Panic(err)
}
}
}
}
func printMe(api *vk.Client) error {
var users []struct {
ID int64 `json:"id"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
}
if err := api.CallMethod("users.get", vk.RequestParams{}, &users); err != nil {
return err
}
me := users[0]
log.Println(me.ID, me.FirstName, me.LastName)
return nil
}
| [
"\"VK_ACCESS_TOKEN\""
]
| []
| [
"VK_ACCESS_TOKEN"
]
| [] | ["VK_ACCESS_TOKEN"] | go | 1 | 0 | |
internal/docstore/mongodocstore/mongo_test.go | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mongodocstore
// To run these tests against a real MongoDB server, first run ./localmongo.sh.
// Then wait a few seconds for the server to be ready.
import (
"context"
"errors"
"os"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"gocloud.dev/internal/docstore"
"gocloud.dev/internal/docstore/driver"
"gocloud.dev/internal/docstore/drivertest"
"gocloud.dev/internal/testing/setup"
)
const (
serverURI = "mongodb://localhost"
dbName = "docstore-test"
collectionName1 = "docstore-test-1"
collectionName2 = "docstore-test-2"
)
type harness struct {
db *mongo.Database
}
func (h *harness) MakeCollection(ctx context.Context) (driver.Collection, error) {
coll, err := newCollection(h.db.Collection(collectionName1), drivertest.KeyField, nil, nil)
if err != nil {
return nil, err
}
// It seems that the client doesn't actually connect until the first RPC, which will
// be this one. So time out quickly if there's a problem.
tctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
if err := coll.coll.Drop(tctx); err != nil {
return nil, err
}
return coll, nil
}
func (h *harness) MakeTwoKeyCollection(ctx context.Context) (driver.Collection, error) {
return newCollection(h.db.Collection(collectionName2), "", drivertest.HighScoreKey, nil)
}
func (h *harness) Close() {}
type codecTester struct{}
func (codecTester) UnsupportedTypes() []drivertest.UnsupportedType {
return []drivertest.UnsupportedType{
drivertest.Complex, drivertest.NanosecondTimes}
}
func (codecTester) DocstoreEncode(x interface{}) (interface{}, error) {
m, err := encodeDoc(drivertest.MustDocument(x), true)
if err != nil {
return nil, err
}
return bson.Marshal(m)
}
func (codecTester) DocstoreDecode(value, dest interface{}) error {
var m map[string]interface{}
if err := bson.Unmarshal(value.([]byte), &m); err != nil {
return err
}
return decodeDoc(m, drivertest.MustDocument(dest), mongoIDField)
}
func (codecTester) NativeEncode(x interface{}) (interface{}, error) {
return bson.Marshal(x)
}
func (codecTester) NativeDecode(value, dest interface{}) error {
return bson.Unmarshal(value.([]byte), dest)
}
type verifyAs struct{}
func (verifyAs) Name() string {
return "verify As"
}
func (verifyAs) CollectionCheck(coll *docstore.Collection) error {
var mc *mongo.Collection
if !coll.As(&mc) {
return errors.New("Collection.As failed")
}
return nil
}
func (verifyAs) BeforeDo(as func(i interface{}) bool) error {
return nil
}
func (verifyAs) BeforeQuery(as func(i interface{}) bool) error {
return nil
}
func (verifyAs) QueryCheck(it *docstore.DocumentIterator) error {
var c *mongo.Cursor
if !it.As(&c) {
return errors.New("DocumentIterator.As failed")
}
return nil
}
func TestConformance(t *testing.T) {
client := newTestClient(t)
defer func() { client.Disconnect(context.Background()) }()
newHarness := func(context.Context, *testing.T) (drivertest.Harness, error) {
return &harness{client.Database(dbName)}, nil
}
drivertest.RunConformanceTests(t, newHarness, codecTester{}, []drivertest.AsTest{verifyAs{}})
}
func newTestClient(t *testing.T) *mongo.Client {
if !setup.HasDockerTestEnvironment() {
t.Skip("Skipping Mongo tests since the Mongo server is not available")
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
client, err := Dial(ctx, serverURI)
if err != nil {
t.Fatalf("dialing to %s: %v", serverURI, err)
}
if err := client.Ping(ctx, nil); err != nil {
t.Fatalf("connecting to %s: %v", serverURI, err)
}
return client
}
// Mongo-specific tests.
func fakeConnectionStringInEnv() func() {
oldURLVal := os.Getenv("MONGO_SERVER_URL")
os.Setenv("MONGO_SERVER_URL", "mongodb://localhost")
return func() {
os.Setenv("MONGO_SERVER_URL", oldURLVal)
}
}
func TestOpenCollectionURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"mongo://mydb/mycollection", false},
// Missing database name.
{"mongo:///mycollection", true},
// Missing collection name.
{"mongo://mydb/", true},
// Passing id_field parameter.
{"mongo://mydb/mycollection?id_field=foo", false},
// Invalid parameter.
{"mongo://mydb/mycollection?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := docstore.OpenCollection(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
func TestLowercaseFields(t *testing.T) {
// Verify that the LowercaseFields option works in all cases.
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
ctx := context.Background()
client := newTestClient(t)
defer func() { client.Disconnect(ctx) }()
db := client.Database(dbName)
dc, err := newCollection(db.Collection("lowercase-fields"), "id", nil, &Options{LowercaseFields: true})
if err != nil {
t.Fatal(err)
}
coll := docstore.NewCollection(dc)
type S struct {
ID, F, G int
DocstoreRevision interface{}
}
// driver.Document.GetField is case-insensitive on structs.
doc := drivertest.MustDocument(&S{ID: 1, DocstoreRevision: 1})
for _, f := range []string{"ID", "Id", "id", "DocstoreRevision", "docstorerevision"} {
got, err := doc.GetField(f)
if err != nil {
t.Errorf("%s: %v", f, err)
}
if got != 1 {
t.Errorf("got %q, want 1", got)
}
}
check := func(got, want interface{}) {
t.Helper()
switch w := want.(type) {
case S:
w.DocstoreRevision = got.(S).DocstoreRevision
want = w
case map[string]interface{}:
w["docstorerevision"] = got.(map[string]interface{})["docstorerevision"]
want = w
}
if !cmp.Equal(got, want) {
t.Errorf("\ngot %+v\nwant %+v", got, want)
}
}
must(coll.Put(ctx, &S{ID: 1, F: 2, G: 3}))
// Get with a struct.
got := S{ID: 1}
must(coll.Get(ctx, &got))
check(got, S{ID: 1, F: 2, G: 3})
// Get with map.
got2 := map[string]interface{}{"id": 1}
must(coll.Get(ctx, got2))
check(got2, map[string]interface{}{"id": int32(1), "f": int64(2), "g": int64(3)})
// Field paths in Get.
got3 := S{ID: 1}
must(coll.Get(ctx, &got3, "G"))
check(got3, S{ID: 1, F: 0, G: 3})
// Field paths in Update.
got4 := map[string]interface{}{"id": 1}
must(coll.Actions().Update(&S{ID: 1}, docstore.Mods{"F": 4}).Get(got4).Do(ctx))
check(got4, map[string]interface{}{"id": int32(1), "f": int64(4), "g": int64(3)})
// // Query filters.
var got5 S
must(coll.Query().Where("ID", "=", 1).Where("G", ">", 2).Get(ctx).Next(ctx, &got5))
check(got5, S{ID: 1, F: 4, G: 3})
}
| [
"\"MONGO_SERVER_URL\""
]
| []
| [
"MONGO_SERVER_URL"
]
| [] | ["MONGO_SERVER_URL"] | go | 1 | 0 | |
app/app.go | package app
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"fmt"
"github.com/gofunct/gofs"
"github.com/google/wire"
"github.com/pkg/errors"
"github.com/spf13/afero"
"github.com/spf13/viper"
"github.com/tcnksm/go-input"
"go.opencensus.io/trace"
"gocloud.dev/blob"
"gocloud.dev/health"
"gocloud.dev/health/sqlhealth"
"gocloud.dev/runtimevar"
"gocloud.dev/server"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
)
const apiVersion = "v1"
var applicationSet = wire.NewSet(
newApplication,
appHealthChecks,
trace.AlwaysSample,
)
type AppFunc func(*App) error
type tfItem struct {
Sensitive bool
Type string
Value string
}
type Maintainer struct {
Name string `json:"name"`
Email string `json:"email"`
Url string `json:"url"`
}
type State struct {
Project tfItem `json:"project"`
ClusterName tfItem `json:"cluster_name"`
ClusterZone tfItem `json:"cluster_zone"`
DatabaseInstance tfItem `json:"database_instance"`
DatabaseRegion tfItem `json:"database_region"`
RUnVarConfig tfItem `json:"run_var_config"`
RunVarName tfItem `json:"run_var_name"`
}
type Path struct {
TfDir string `json:"tfDir"`
ProtoDir string `json:"protoDir"`
TemplateDir string `json:"templateDir"`
StaticDir string `json:"staticDir"`
CertPath string `json:"certPath"`
KeyPath string `json:"keyPath"`
}
type Chart struct {
Icon string `json:"icon"`
ApiVersion string `json:"apiVersion"`
Version string `json:"version"`
AppVersion string `json:"appVersion"`
Name string `json:"name"`
Description string `json:"description"`
KeyWords []string `json:"keywords"`
Home string `json:"home"`
Sources []string `json:"sources"`
Maintainers []*Maintainer `json:"maintainers"`
}
type Values struct {
Modules bool `json:"modules"`
Bucket string `json:"bucket"`
DbName string `json:"dbHost"`
DbHost string `json:"dbHost"`
DbUser string `json:"dbUser"`
DbPassword string `json:"dbPassword"`
RunVarWait string `json:"runVarWait"`
GitIgnore []string `json:"gitignore"`
DockerIgnore []string `json:"dockerignore"`
RunVar string `json:"run_var"`
State *State `json:"state"`
Path *Path `json:"path"`
}
type App struct {
Values *Values `json:"values"`
Chart *Chart `json:"chart"`
AppFuncs []AppFunc
fs afero.Fs
chart *viper.Viper
values *viper.Viper
q *input.UI
buf *bytes.Buffer
srv *server.Server
db *sql.DB
bucket *blob.Bucket
// The following fields are protected by mu:
mu sync.RWMutex
motd string
}
func (a *App) Execute(reader io.Reader, writer io.Writer) error {
a.buf = bytes.NewBuffer(nil)
if reader == nil {
reader = os.Stdin
}
if writer == nil {
writer = os.Stdout
}
if _, err := a.buf.ReadFrom(reader); err != nil {
return err
}
{
a.chart = viper.New()
a.chart.SetConfigType("yaml")
a.chart.SetConfigName("Chart")
a.chart.AddConfigPath("helm")
a.chart.AutomaticEnv()
a.chart.AllowEmptyEnv(true)
{
a.chart.SetDefault("icon", "https://github.com/gofunct/logo/blob/master/white_logo_dark_background.jpg?raw=true")
a.chart.SetDefault("apiVersion", apiVersion)
a.chart.SetDefault("version", "v0.0.1")
a.chart.SetDefault("appVersion", "0.1")
a.chart.SetDefault("name", filepath.Base(os.Getenv("HOME")))
a.chart.SetDefault("description", "this is your applications default description")
a.chart.SetDefault("keywords", []string{"stencil", filepath.Base(os.Getenv("HOME"))})
a.chart.SetDefault("home", "https://"+filepath.Join("github", os.Getenv("PWD"))+".com")
a.chart.SetDefault("sources", filepath.Join("github", os.Getenv("PWD")))
a.chart.SetDefault("maintainers", &Maintainer{
Name: "Coleman Word",
Email: "[email protected]",
Url: "https://github.com/gofunct",
})
}
_ := a.values.SafeWriteConfig()
if err := a.chart.ReadInConfig(); err != nil {
return errors.Wrap(err, "failed to read Chart config")
}
}
{
a.values = viper.New()
a.values.SetConfigType("yaml")
a.values.SetConfigName("values")
a.values.AddConfigPath("helm")
a.values.AutomaticEnv()
a.values.AllowEmptyEnv(true)
{
if gofs.FileExists("deploy/*.tf") {
bytess, err := a.ShellOutput("terraform", "output", "-state", "deploy", "-json")
if err != nil {
return errors.Wrap(err, `failed to execute terraform with args: "terraform", "output", "-state", "deploy", "-json"`)
}
if err := a.values.ReadConfig(bytes.NewBuffer(bytess)); err != nil {
return errors.Wrap(err, "failed to read in terraform config")
}
}
}
{
a.values.SetDefault("modules", true)
a.values.SetDefault("bucket", filepath.Base(os.Getenv("HOME"))+"-bucket")
a.values.SetDefault("dbHost", filepath.Base(os.Getenv("HOME"))+"-db")
a.values.SetDefault("dbHost", "")
a.values.SetDefault("dbUser", os.Getenv("USER"))
a.values.SetDefault("dbPassword", "admin")
a.values.SetDefault("runVarWait", 15*time.Second)
a.values.SetDefault("gitignore", []string{"vendor", ".idea", "bin", "temp", "certs"})
a.values.SetDefault("dockerignore", []string{"*.md", ".idea"})
a.values.SetDefault("tfDir", "./deploy")
a.values.SetDefault("protoDir", "./proto")
a.values.SetDefault("templateDir", "./templates")
a.values.SetDefault("staticDir", "./static")
a.values.SetDefault("certPath", "./certs/app.pem")
a.values.SetDefault("certPath", "./certs/app.key")
}
_ = a.values.SafeWriteConfig()
if err := a.values.ReadInConfig(); err != nil {
return errors.Wrap(err, "failed to read config")
}
}
{
if err := a.chart.Unmarshal(&a.Chart); err != nil {
return errors.Wrap(err, "failed to unmarshal Chart config")
}
if err := a.values.Unmarshal(&a.Chart); err != nil {
return errors.Wrap(err, "failed to unmarshal config")
}
for _, f := range a.AppFuncs {
if err := f(a); err != nil {
return err
}
}
if err := a.values.WriteConfig(); err != nil {
return errors.Wrap(err, "failed to update values config")
}
if err := a.chart.WriteConfig(); err != nil {
return errors.Wrap(err, "failed to update chart config")
}
}
if _, err := a.buf.WriteTo(writer); err != nil {
return err
}
return nil
}
func (a *App) WriteConfig() error {
if err := a.chart.WriteConfig(); err != nil {
return err
}
if err := a.values.WriteConfig(); err != nil {
return err
}
return nil
}
func (a *App) Shell(args ...string) (stdout string, err error) {
stdoutb, err := a.ShellOutput(args...)
return strings.TrimSpace(string(stdoutb)), err
}
func (a *App) ShellOutput(args ...string) (stdout []byte, err error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stderr = os.Stderr
cmd.Env = append(cmd.Env, os.Environ()...)
stdoutb, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("running %v: %v", cmd.Args, err)
}
return stdoutb, nil
}
func (a *App) MarshalJson(v interface{}) []byte {
output, _ := json.MarshalIndent(v, "", " ")
return output
}
func (a *App) UnmarshalJson(v interface{}) error {
s := a.MarshalJson(a.values.AllSettings())
s = append(s, a.MarshalJson(a.chart.AllSettings())...)
if err := json.Unmarshal(s, v); err != nil {
return err
}
return nil
}
func (a *App) Read(p []byte) (n int, err error) {
return a.buf.Read(p)
}
func (a *App) Write(p []byte) (n int, err error) {
return a.buf.Write(p)
}
func (a *App) QueryChart(name, defaul, question string, loop, required bool) {
if a.q == nil {
a.q = input.DefaultUI()
}
ans, err := a.q.Ask(question, &input.Options{
Default: defaul,
Loop: loop,
Required: required,
ValidateFunc: validateChart(name, required),
})
if err != nil {
panic(err)
}
}
func (a *App) QueryValue(name, defaul, question string, required bool) {
if a.q == nil {
a.q = input.DefaultUI()
}
ans, err := a.q.Ask(question, &input.Options{
Default: defaul,
Loop: required,
Required: required,
ValidateFunc: validateValues(name, required),
})
if err != nil {
panic(err)
}
}
func Initialize(app *App) *App {
app.buf = bytes.NewBuffer(nil)
app.buf.ReadFrom(os.Stdin)
app.buf.ReadFrom(os.Stdout)
app.chart = viper.New()
app.chart.SetConfigType("yaml")
app.chart.SetConfigName("Chart")
app.chart.AddConfigPath("helm")
app.chart.AutomaticEnv()
app.chart.AllowEmptyEnv(true)
app.values = viper.New()
app.values.SetConfigType("yaml")
app.values.SetConfigName("values")
app.values.AddConfigPath("helm")
app.values.AutomaticEnv()
app.values.AllowEmptyEnv(true)
app.q = input.DefaultUI()
return app
}
func validateChart(key string, required bool) func(s string) error {
return func(s string) error {
if s == "" && required {
return errors.New("query error: empty input detected")
}
if len(s) > 50 {
return errors.New("query error: over 50 characters")
}
return nil
}
}
func validateValues(key string, required bool) func(s string) error {
return func(s string) error {
if s == "" {
return errors.New("query error: empty input detected")
}
if len(s) > 50 {
return errors.New("query error: over 50 characters")
}
return nil
}
}
// appHealthChecks returns a health check for the database. This will signal
// to Kubernetes or other orchestrators that the server should not receive
// traffic until the server is able to connect to its database.
func appHealthChecks(db *sql.DB) ([]health.Checker, func()) {
dbCheck := sqlhealth.New(db)
list := []health.Checker{dbCheck}
return list, func() {
dbCheck.Stop()
}
}
// newApplication creates a new application struct based on the backends and the message
// of the day variable.
func newApplication(srv *server.Server, db *sql.DB, bucket *blob.Bucket, motdVar *runtimevar.Variable) *App {
app := &App{
srv: srv,
db: db,
bucket: bucket,
}
go app.watchMOTDVar(motdVar)
return app
}
// watchMOTDVar listens for changes in v and updates the app's message of the
// day. It is run in a separate goroutine.
func (a *App) watchMOTDVar(v *runtimevar.Variable) {
ctx := context.Background()
for {
snap, err := v.Watch(ctx)
if err != nil {
log.Printf("watch MOTD variable: %v", err)
continue
}
log.Println("updated MOTD to", snap.Value)
a.mu.Lock()
a.motd = snap.Value.(string)
a.mu.Unlock()
}
}
| [
"\"HOME\"",
"\"HOME\"",
"\"PWD\"",
"\"PWD\"",
"\"HOME\"",
"\"HOME\"",
"\"USER\""
]
| []
| [
"PWD",
"USER",
"HOME"
]
| [] | ["PWD", "USER", "HOME"] | go | 3 | 0 | |
test_automate_download_freesound.py | """
Unit tests for test_automate_download_freesound.py
Run with:
$ pytest
"""
import unittest
import mock
import automate_download_freesound
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException
from collections import namedtuple
import pytest
import os
import shutil
class FreeSoundLoginElementsTest(unittest.TestCase):
'''Using the setUpClass() and tearDownClass() methods along with the @classmethod decorator.
These methods enable us to set the values at the class level rather than at method level.
The values initialized at class level are shared between the test methods.'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.get("https://freesound.org/home/login/?next=/")
def test_login_username(self):
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="id_username"]'))
def test_password(self):
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="id_password"]'))
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def is_element_present(self, how, what):
'''
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
'''
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
class FreeSoundSearchByTextTest(unittest.TestCase):
'''Using the setUpClass() and tearDownClass() methods along with the @classmethod decorator.
These methods enable us to set the values at the class level rather than at method level.
The values initialized at class level are shared between the test methods.'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.get("https://freesound.org/")
def test_search_by_text(self):
'''
Test to see that the number of elements within the first page of a search item is 15.
'''
# get the search textbox
self.search_field = self.driver.find_element_by_name("q")
self.search_field.clear()
# enter search keyword and submit
self.search_field.send_keys("dogs barking")
self.search_field.submit()
# get the list of elements which are displayed after the search
# currently on result page using find_elements_by_class_name method
lists = self.driver.find_elements_by_class_name("title")
self.assertEqual(15, len(lists))
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def is_element_present(self, how, what):
'''
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
'''
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
class FreeSoundSearchTest(unittest.TestCase):
'''Using the setUpClass() and tearDownClass() methods along with the @classmethod decorator.
These methods enable us to set the values at the class level rather than at method level.
The values initialized at class level are shared between the test methods.'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.get("https://freesound.org/search/?q=")
def test_for_number_of_elements(self):
# get the list of elements which are displayed after the search
# currently on result page using find_elements_by_class_name method
lists = self.driver.find_elements_by_class_name("title")
self.assertEqual(15, len(lists))
def test_for_file_format(self):
'''
Test to find attribute of file format
'''
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="sidebar"]/h3[3]'))
def test_for_wav_file_format(self):
'''
Test to find specific wav format
'''
self.assertTrue(self.is_element_present(By.LINK_TEXT, 'wav'))
def test_for_samplerate(self):
'''
Test to find attribute of samplerate
'''
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="sidebar"]/h3[4]'))
def test_for_specific_samplerate(self):
'''
Test to find specific 48000 sample rate
'''
self.assertTrue(self.is_element_present(By.LINK_TEXT, '48000'))
def test_for_advanced_filter(self):
'''
Test for advanced filter button
'''
self.assertTrue(self.is_element_present(
By.CSS_SELECTOR, 'a[onclick*=showAdvancedSearchOption'))
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def is_element_present(self, how, what):
'''
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
'''
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
class FreeSoundAdvancedFilter(unittest.TestCase):
'''Using the setUpClass() and tearDownClass() methods along with the @classmethod decorator.
These methods enable us to set the values at the class level rather than at method level.
The values initialized at class level are shared between the test methods.'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.get("https://freesound.org/search/?q=")
cls.driver.find_element_by_css_selector('a[onclick*=showAdvancedSearchOption').click()
def test_fail_filter_item(self):
with self.assertRaises(NoSuchElementException):
self.driver.find_element_by_id('10000')
def test_tags_filter_item(self):
tag_element = WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located((By.NAME, 'a_tag')))
tag_element.click()
self.assertTrue(tag_element.is_selected())
def test_filenames_filter_item(self):
file_element = WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located((By.NAME, 'a_filename')))
file_element.click()
self.assertTrue(file_element.is_selected())
def test_description_filter_item(self):
description_element = WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located((By.NAME, 'a_description')))
description_element.click()
self.assertTrue(description_element.is_selected())
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def is_element_present(self, how, what):
'''
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
'''
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
class SimulateDownloadIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.email = os.environ['FREESOUND_EMAIL']
cls.password = os.environ['FREESOUND_PASSWORD']
cls.download_path = os.path.expanduser("~") + "/Downloads/"
shutil.rmtree(os.path.join(cls.download_path, 'toaster pop set'), ignore_errors=True)
shutil.rmtree(os.path.join(cls.download_path, 'tiger'), ignore_errors=True)
shutil.rmtree(os.path.join(cls.download_path, 'glass breaking'), ignore_errors=True)
def test_simulate_download_basic(self):
'''
Testing with no filters, just the one positional argument
'''
args = automate_download_freesound.parse_args(['automate_download_freesound.py', 'toaster pop set'])
download_count = automate_download_freesound.simulate_download(
args.sounds[0], self.download_path, self.email, self.password, args)
self.assertEqual(download_count, 2)
update_download_path = os.path.join(self.download_path, 'toaster pop set')
self.assertEqual(len(os.listdir(update_download_path)), download_count)
def test_simulate_download_optional_arguments(self):
'''
Testing with optional filter arguments
'''
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', 'tiger',
'--sample-rate', '48000', '--file-format', 'mp3'])
download_count = automate_download_freesound.simulate_download(
args.sounds[0], self.download_path, self.email, self.password, args)
self.assertEqual(download_count, 2)
update_download_path = os.path.join(self.download_path, 'tiger')
self.assertEqual(len(os.listdir(update_download_path)), download_count)
def test_simulate_download_advanced_filters(self):
'''
Testing with optional arguments and advanced filters
'''
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', 'glass breaking',
'--sample-rate', '48000', '--file-format', 'flac', '--advanced-filter', 'True'])
download_count = automate_download_freesound.simulate_download(
args.sounds[0], self.download_path, self.email, self.password, args)
self.assertEqual(download_count, 3)
update_download_path = os.path.join(self.download_path, 'glass breaking')
self.assertEqual(len(os.listdir(update_download_path)), download_count)
@classmethod
def tearDownClass(cls):
shutil.rmtree(os.path.join(cls.download_path, 'toaster pop set'), ignore_errors=True)
shutil.rmtree(os.path.join(cls.download_path, 'tiger'), ignore_errors=True)
shutil.rmtree(os.path.join(cls.download_path, 'glass breaking'), ignore_errors=True)
class FreeSoundLoginAuthenticationTest(unittest.TestCase):
@mock.patch('getpass.getpass')
@mock.patch('__builtin__.raw_input')
def test_authenticate(self, input, getpass):
input.return_value = '[email protected]'
getpass.return_value = 'MyPassword'
Credentials = namedtuple('Credentials', ['email', 'password'])
user_info = Credentials(email=input.return_value, password=getpass.return_value)
self.assertEqual(automate_download_freesound.authenticate(), user_info)
@mock.patch('getpass.getpass')
@mock.patch('__builtin__.raw_input')
def test_verify_authentication_fail(self, input, getpass):
'''
Test verify authentication to make sure fail
'''
input.return_value = '[email protected]'
getpass.return_value = 'MyPassword'
Credentials = namedtuple('Credentials', ['email', 'password'])
user_info = Credentials(email=input.return_value, password=getpass.return_value)
self.assertFalse(automate_download_freesound.verify_authentication(user_info))
@mock.patch('getpass.getpass')
@mock.patch('__builtin__.raw_input')
def test_verify_authentication_pass(self, input, getpass):
'''
Test verify authentication to make sure pass
'''
input.return_value = os.environ['FREESOUND_EMAIL']
getpass.return_value = os.environ['FREESOUND_PASSWORD']
Credentials = namedtuple('Credentials', ['email', 'password'])
user_info = Credentials(email=input.return_value, password=getpass.return_value)
self.assertTrue(automate_download_freesound.verify_authentication(user_info))
class CommandLineArgumentsTests(unittest.TestCase):
def test_parse_args_sound_one(self):
'''
Basic test to test for the sound positional argument of parse_args()
'''
args = automate_download_freesound.parse_args(['automate_download_freesound.py', 'dogs'])
self.assertEqual(args.sounds, ['dogs'])
self.assertEqual(args.downloadpath, os.path.expanduser("~") + "/Downloads/")
self.assertEqual(args.file_format, None)
self.assertEqual(args.samplerate, None)
self.assertFalse(args.advanced_filter)
def test_parse_args_sound_two(self):
'''
Test for multiple arguments that include spaces, but separated by commas
'''
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs barking loud, birds chirping loud"])
self.assertEqual(args.sounds, ['dogs barking loud', 'birds chirping loud'])
self.assertEqual(args.downloadpath, os.path.expanduser("~") + "/Downloads/")
self.assertEqual(args.file_format, None)
self.assertEqual(args.samplerate, None)
self.assertFalse(args.advanced_filter)
def test_parse_args_sound_three(self):
'''
Test for leading/extra commas as input
'''
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,"])
self.assertEqual(args.sounds, ['dogs', 'cats', 'birds'])
self.assertEqual(args.downloadpath, os.path.expanduser("~") + "/Downloads/")
self.assertEqual(args.file_format, None)
self.assertEqual(args.samplerate, None)
self.assertFalse(args.advanced_filter)
def test_parse_args_format_pass(self):
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,", "--file-format", "wav"])
assert args.file_format in [None, "wav", "flac", "aiff", "ogg", "mp3", "m4a"]
def test_parse_args_format_fail(self):
'''Test to see if there is a command line syntax error of wth error code 2
'''
with self.assertRaises(SystemExit) as err:
automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,", "--file-format", "mp5"])
self.assertEqual(err.exception.code, 2)
def test_parse_args_samplerate_pass(self):
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,", "--sample-rate", "48000"])
assert int(args.samplerate) in [None, 11025, 16000, 22050, 44100, 48000, 88200, 96000]
def test_parse_args_samplerate_fail(self):
'''Test to see if there is a command line syntax error of wth error code 2
'''
with self.assertRaises(SystemExit) as err:
automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,", "--sample-rate", "2500"])
self.assertEqual(err.exception.code, 2)
def test_parse_args_download_path_pass(self):
args = automate_download_freesound.parse_args(
['automate_download_freesound.py', "dogs,cats,birds,"])
self.assertEqual(args.downloadpath, os.path.expanduser("~") + "/Downloads/")
def test_main(self):
'''
Test for main function to exit with error code 1 and provide help if no arguments provided
'''
with self.assertRaises(SystemExit) as err:
automate_download_freesound.main(['automate_download_freesound.py'])
self.assertEqual(err.exception.code, 1)
| []
| []
| [
"FREESOUND_PASSWORD",
"FREESOUND_EMAIL"
]
| [] | ["FREESOUND_PASSWORD", "FREESOUND_EMAIL"] | python | 2 | 0 | |
PercBot.py | import os
import discord
import pickle
import re
from datetime import datetime, timedelta
import asyncio
import json
import requests
import yfinance as yf
token = os.getenv("TOKEN")
client = discord.Client()
@client.event
async def on_ready():
print("Logged in as " + str(client.user))
@client.event
async def on_message(msg):
if msg.author == client.user:
return
elif msg.content.lower().startswith("-pk "):
await MessageHandler(msg)
elif msg.content.lower().startswith("-end"):
await msg.channel.send("PercBot has been terminated")
async def MessageHandler(msg):
with open("key.json", "r") as f:
j = json.loads(f.read())
content = msg.content.split()
command = content[1].lower()
if content[1] in j:
await msg.channel.send(j[content[1]])
elif command == "-timer":
await MakeTimer(msg, int(content[2]), content[3:])
elif command == "-addimage":
try:
imageAdder(msg)
except Exception:
await msg.channel.send("Could not make image")
elif command == "-image":
key = content[2]
with open("Assets.json", "r") as f:
j = json.loads(f.read())
await msg.channel.send(file=discord.File(j[key]))
elif command == "-t":
await StockPriceHandler(msg)
def imageAdder(msg):
key = msg.content.split(" ")[2]
URL = msg.content.split(" ")[3]
slugs = [".png", ".jpg", ".gif"]
for s in slugs:
if URL.lower().endswith(s):
slug = s
try:
ImageResponse = requests.get(URL)
except Exception as e:
print(e)
return
with open(f"Assets/{key}{slug}", "wb") as f:
f.write(ImageResponse.content)
with open("Assets.json", "r") as f:
js = json.loads(f.read())
js[key] = f"Assets/{key}{slug}"
with open("Assets.json", "w") as f:
f.write(json.dumps(js))
async def StockPriceHandler(msg):
content = msg.content.split(" ")
ticker = content[2].upper()
try:
tickerInfo = yf.Ticker(ticker).info
except KeyError:
await msg.channel.send("Could not find ticker symbol")
return
price = tickerInfo["ask"]
company = tickerInfo["longName"]
info = ""
if len(content) > 3:
info = TickerMetricFormater(tickerInfo, content[3:])
await msg.channel.send("{}: ${:,.2f}".format(company, price) + f"{info}")
def TickerMetricFormater(tickerInfo, metrics):
commandHash = {
"MARKET_CAP": "marketCap",
"PE": "trailingPE",
"DIVIDEND": "dividendRate"}
percentage_metrics = ["DIVIDEND"]
currency_metrics = ["MARKET_CAP"]
extraInfo = []
for command in metrics:
if command.upper() in commandHash:
if command.upper() in percentage_metrics:
extraInfo.append("{}: {}%\n".format(
command.upper(), tickerInfo[commandHash[command.upper()]]))
elif command.upper() in currency_metrics:
extraInfo.append("{}: ${:,.2f}\n".format(
command.upper(), tickerInfo[commandHash[command.upper()]]))
else:
extraInfo.append("{}: {}\n".format(
command.upper(), tickerInfo[commandHash[command.upper()]]))
info = "\n" + "".join(extraInfo)
return info
async def MakeTimer(message, time, endMessage):
await message.channel.send(f"Timer set for {time} minutes")
await asyncio.sleep(60 * time)
await message.channel.send(" ".join(endMessage))
client.run(token)
| []
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | python | 1 | 0 | |
ansible/contrib/inventory/apache-libcloud.py | #!/usr/bin/env python
# (c) 2013, Sebastien Goasguen <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Apache Libcloud generic external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Cloud providers using the Apache libcloud library.
This script also assumes there is a libcloud.ini file alongside it
'''
import sys
import os
import argparse
import re
from time import time
import ConfigParser
from six import iteritems, string_types
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security as sec
try:
import json
except ImportError:
import simplejson as json
class LibcloudInventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the libcloud.ini file '''
config = ConfigParser.SafeConfigParser()
libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
config.read(libcloud_ini_path)
if not config.has_section('driver'):
raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'):
self.provider = config.get('driver','provider')
else:
raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'):
self.key = config.get('driver','key')
else:
raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'):
self.secret = config.get('driver','secret')
else:
raise ValueError('libcloud.ini does not have a secret defined')
if config.has_option('driver', 'host'):
self.host = config.get('driver', 'host')
if config.has_option('driver', 'secure'):
self.secure = config.get('driver', 'secure')
if config.has_option('driver', 'verify_ssl_cert'):
self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
if config.has_option('driver', 'port'):
self.port = config.get('driver', 'port')
if config.has_option('driver', 'path'):
self.path = config.get('driver', 'path')
if config.has_option('driver', 'api_version'):
self.api_version = config.get('driver', 'api_version')
Driver = get_driver(getattr(Provider, self.provider))
self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
host=self.host, path=self.path)
# Cache related
cache_path = config.get('cache', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
'''
Do API calls to a location, and save data in cache files
'''
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
'''
Gets the list of all nodes
'''
for node in self.conn.list_nodes():
self.add_node(node)
def get_node(self, node_id):
'''
Gets details about a specific node
'''
return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node):
'''
Adds a node to the inventory and index, as long as it is
addressable
'''
# Only want running instances
if node.state != 0:
return
# Select the best destination address
if not node.public_ips == []:
dest = node.public_ips[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = node.name
# Inventory: Group by instance ID (always a group of 1)
self.inventory[node.name] = [dest]
'''
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, node.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
if node.extra['key_name']:
self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
if node.extra['security_group']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
# Inventory: Group by tag
if node.extra['tags']:
for tagkey in node.extra['tags'].keys():
self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
def get_host_info(self):
'''
Get variables about a specific host
'''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
if isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in iteritems(value):
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print(key)
#print(type(value))
#print(value)
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
'''
Pushed an element onto an array that may not have been defined in
the dict
'''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
'''
Reads the inventory from the cache file and returns it as a JSON
object
'''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
'''
Reads the index from the cache file sets self.index
'''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
'''
Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups
'''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
'''
Converts a dict to a JSON object and dumps it as a formatted
string
'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def main():
LibcloudInventory()
if __name__ == '__main__':
main()
| []
| []
| [
"LIBCLOUD_INI_PATH"
]
| [] | ["LIBCLOUD_INI_PATH"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profile_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
plugins/callbacks.py | import os
import ast
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from database.filters_mdb import del_all, find_filter
from database.connections_mdb import(
all_connections,
active_connection,
if_active,
delete_connection,
make_active,
make_inactive
)
@trojanz.on_callback_query()
async def cb_handler(client, query):
if query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
)
await query.message.edit_text(
Script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Movie Group", url="https://t.me/Goodwillmovies"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("BOT Channel", url="https://t.me/milnabotchannel"),
InlineKeyboardButton("Support Group", url="https://t.me/milnabotsgroup")
]
]
)
await query.message.edit_text(
Script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"BOT LIST", url="https://t.me/milnabotchannel/14")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
)
await query.message.edit_text(
Script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "close_data":
await query.message.delete()
elif query.data == "delallconfirm":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
grpid = await active_connection(str(userid))
if grpid is not None:
grp_id = grpid
try:
chat = await client.get_chat(grpid)
title = chat.title
except:
await query.message.edit_text("Make sure I'm present in your group!!", quote=True)
return
else:
await query.message.edit_text(
"I'm not connected to any groups!\nCheck /connections or connect to any groups",
quote=True
)
return
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
title = query.message.chat.title
else:
return
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await del_all(query.message, grp_id, title)
else:
await query.answer("You need to be Group Owner or an Auth User to do that!",show_alert=True)
elif query.data == "delallcancel":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
await query.message.reply_to_message.delete()
await query.message.delete()
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await query.message.delete()
try:
await query.message.reply_to_message.delete()
except:
pass
else:
await query.answer("Thats not for you!!",show_alert=True)
elif "groupcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
act = query.data.split(":")[3]
user_id = query.from_user.id
if act == "":
stat = "CONNECT"
cb = "connectcb"
else:
stat = "DISCONNECT"
cb = "disconnect"
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(f"{stat}", callback_data=f"{cb}:{group_id}:{title}"),
InlineKeyboardButton("DELETE", callback_data=f"deletecb:{group_id}")],
[InlineKeyboardButton("BACK", callback_data="backcb")]
])
await query.message.edit_text(
f"Group Name : **{title}**\nGroup ID : `{group_id}`",
reply_markup=keyboard,
parse_mode="md"
)
return
elif "connectcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
user_id = query.from_user.id
mkact = await make_active(str(user_id), str(group_id))
if mkact:
await query.message.edit_text(
f"Connected to **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "disconnect" in query.data:
await query.answer()
title = query.data.split(":")[2]
user_id = query.from_user.id
mkinact = await make_inactive(str(user_id))
if mkinact:
await query.message.edit_text(
f"Disconnected from **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "deletecb" in query.data:
await query.answer()
user_id = query.from_user.id
group_id = query.data.split(":")[1]
delcon = await delete_connection(str(user_id), str(group_id))
if delcon:
await query.message.edit_text(
"Successfully deleted connection"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif query.data == "backcb":
await query.answer()
userid = query.from_user.id
groupids = await all_connections(str(userid))
if groupids is None:
await query.message.edit_text(
"There are no active connections!! Connect to some groups first.",
)
return
buttons = []
for groupid in groupids:
try:
ttl = await client.get_chat(int(groupid))
title = ttl.title
active = await if_active(str(userid), str(groupid))
if active:
act = " - ACTIVE"
else:
act = ""
buttons.append(
[
InlineKeyboardButton(
text=f"{title}{act}", callback_data=f"groupcb:{groupid}:{title}:{act}"
)
]
)
except:
pass
if buttons:
await query.message.edit_text(
"Your connected group details ;\n\n",
reply_markup=InlineKeyboardMarkup(buttons)
)
elif "alertmessage" in query.data:
grp_id = query.message.chat.id
i = query.data.split(":")[1]
keyword = query.data.split(":")[2]
reply_text, btn, alerts, fileid = await find_filter(grp_id, keyword)
if alerts is not None:
alerts = ast.literal_eval(alerts)
alert = alerts[int(i)]
alert = alert.replace("\\n", "\n").replace("\\t", "\t")
await query.answer(alert,show_alert=True)
| []
| []
| [
"WEBHOOK"
]
| [] | ["WEBHOOK"] | python | 1 | 0 | |
tests/test_payments.py | import unittest
import os
from app.models import Payment, PaymentMethodType, PaymentStatus, db
from app.custom_exceptions import DataValidationError
from app import app
DATABASE_URI = os.getenv('DATABASE_URI', 'sqlite:///../db/test.db')
######################################################################
# T E S T C A S E S
######################################################################
class TestPayments(unittest.TestCase):
""" Test Cases for Payments API """
@classmethod
def setUpClass(cls):
""" These run once per Test suite """
app.debug = False
# Set up the test database
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
Payment.init_db()
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
db.session.remove()
db.drop_all()
def test_create_a_payment(self):
""" Create a payment and assert that it exists """
payment = Payment(customer_id=12311, order_id = 11151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = False)
self.assertTrue(payment != None)
self.assertEqual(payment.id, None)
self.assertEqual(payment.customer_id, 12311)
self.assertEqual(payment.order_id, 11151)
self.assertEqual(payment.payment_method_type, PaymentMethodType.CREDIT)
self.assertEqual(payment.payment_status, PaymentStatus.PAID)
self.assertEqual(payment.default_payment_type, False)
def test_add_a_payment(self):
""" Create a payment and add it to database """
payments = Payment.all()
self.assertEqual(payments, [])
payment = Payment(customer_id=12011, order_id=11051, payment_method_type=PaymentMethodType.CREDIT, payment_status=PaymentStatus.PAID, default_payment_type=True)
self.assertTrue(payment != None)
self.assertEqual(payment.id, None)
payment.save()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(payment.id, 1)
payments = Payment.all()
self.assertEqual(len(payments), 1)
def test_find_payment(self):
""" Find a Payment by it's ID """
payment11 = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment11.save()
payment = Payment.find(payment11.id)
self.assertIsNot(payment, None)
self.assertEqual(payment.id, payment11.id)
self.assertEqual(payment.customer_id, 12310)
self.assertEqual(payment.order_id, 13151)
self.assertEqual(payment.payment_method_type, PaymentMethodType.CREDIT)
self.assertEqual(payment.payment_status, PaymentStatus.PAID)
self.assertEqual(payment.default_payment_type, True)
def test_find_payment_or_404(self):
""" Find a Payment by it's ID """
payment11 = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment11.save()
payment = Payment.find_or_404(payment11.id)
self.assertIsNot(payment, None)
self.assertEqual(payment.id, payment11.id)
self.assertEqual(payment.customer_id, 12310)
self.assertEqual(payment.order_id, 13151)
self.assertEqual(payment.payment_method_type, PaymentMethodType.CREDIT)
self.assertEqual(payment.payment_status, PaymentStatus.PAID)
self.assertEqual(payment.default_payment_type, True)
def test_update_payment(self):
""" Update a Payment Resource """
payment = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment.save()
self.assertEqual(payment.id, 1)
# Change it and save it
payment.payment_method_type = PaymentMethodType.DEBIT
payment.save()
self.assertEqual(payment.id, 1)
# Fetch it back and make sure the id hasn't changed
# but the data did change
payments = Payment.all()
self.assertEqual(len(payments), 1)
self.assertEqual(payments[0].payment_method_type, PaymentMethodType.DEBIT)
def test_set_payment_default(self):
""" Set a payment as default """
payment = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = False)
payment.save()
self.assertEqual(payment.default_payment_type, False)
# Retrieve from DB and confirm it saved correctly
payment2 = Payment.find(payment.id)
self.assertEqual(payment2.default_payment_type, False)
# Change it and save it
payment2.set_default()
payment2.save()
# Retrieve from DB and confirm it saved correctly
payment3 = Payment.find(payment.id)
self.assertEqual(payment3.default_payment_type, True)
def test_set_payment_not_default(self):
""" Set a payment as not default """
payment = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment.save()
self.assertEqual(payment.default_payment_type, True)
# Retrieve from DB and confirm it saved correctly
payment2 = Payment.find(payment.id)
self.assertEqual(payment2.default_payment_type, True)
# Change it and save it
payment2.unset_default()
payment2.save()
# Retrieve from DB and confirm it saved correctly
payment3 = Payment.find(payment.id)
self.assertEqual(payment3.default_payment_type, False)
def test_delete_a_payment(self):
""" Delete a Payment """
payment = Payment(customer_id=12310, order_id = 13159, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = False)
payment.save()
self.assertEqual(len(Payment.all()), 1)
# delete the card and make sure it isn't in the database
payment.delete()
self.assertEqual(len(Payment.all()), 0)
def test_serialize_a_payment(self):
""" Test serialization of a Payment Resource """
payment = Payment(customer_id=12310, order_id = 13151, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
data = payment.serialize()
self.assertNotEqual(data, None)
self.assertIn('id', data)
self.assertEqual(data['id'], None)
self.assertIn('customer_id', data)
self.assertEqual(data['customer_id'], 12310)
self.assertIn('order_id', data)
self.assertEqual(data['order_id'], 13151)
self.assertIn('payment_method_type', data)
self.assertEqual(data['payment_method_type'], PaymentMethodType.CREDIT)
self.assertIn('payment_status', data)
self.assertEqual(data['payment_status'], PaymentStatus.PAID)
self.assertIn('default_payment_type', data)
self.assertEqual(data['default_payment_type'], True)
def test_deserialize_a_payment(self):
""" Test deserialization of a Payment """
data = {"customer_id":12311, "order_id":11158, "payment_method_type":PaymentMethodType.CREDIT, "payment_status":PaymentStatus.PAID, "default_payment_type":False}
payment = Payment()
payment.deserialize(data)
self.assertNotEqual(payment, None)
self.assertEqual(payment.id, None)
self.assertEqual(payment.customer_id, 12311)
self.assertEqual(payment.order_id, 11158)
self.assertEqual(payment.payment_method_type, PaymentMethodType.CREDIT)
self.assertEqual(payment.payment_status, PaymentStatus.PAID)
self.assertEqual(payment.default_payment_type, False)
def test_deserialize_a_payment_missing_data(self):
data = {"customer_id":12311, "payment_method_type":PaymentMethodType.CREDIT, "payment_status":PaymentStatus.PAID, "default_payment_type":False}
payment = Payment()
self.assertRaises(DataValidationError, payment.deserialize, data)
def test_get_deault_payment_type(self):
payment1 = Payment(customer_id=12310, order_id = 13159, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment1.save()
payment2 = Payment(customer_id=12310, order_id = 13159, payment_method_type = PaymentMethodType.CREDIT, payment_status = PaymentStatus.PAID, default_payment_type = True)
payment2.save()
payments = Payment.get_default_payment_type();
self.assertEqual(len(payments), 2)
def test_deserialize_bad_data(self):
""" Test deserialization of bad data """
data = "this is not a dictionary"
payment = Payment()
self.assertRaises(DataValidationError, payment.deserialize, data)
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"DATABASE_URI"
]
| [] | ["DATABASE_URI"] | python | 1 | 0 | |
chain/vm/vmi.go | package vm
import (
"context"
"os"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
)
type Interface interface {
// Applies the given message onto the VM's current state, returning the result of the execution
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
// Same as above but for system messages (the Cron invocation and block reward payments).
// Must NEVER fail.
ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error)
// Flush all buffered objects into the state store provided to the VM at construction.
Flush(ctx context.Context) (cid.Cid, error)
}
var experimentalUseFvm = os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1"
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
if opts.NetworkVersion >= network.Version16 {
return NewFVM(ctx, opts)
}
// Remove after v16 upgrade, this is only to support testing and validation of the FVM
if experimentalUseFvm && opts.NetworkVersion >= network.Version15 {
return NewFVM(ctx, opts)
}
return NewLegacyVM(ctx, opts)
}
| [
"\"LOTUS_USE_FVM_EXPERIMENTAL\""
]
| []
| [
"LOTUS_USE_FVM_EXPERIMENTAL"
]
| [] | ["LOTUS_USE_FVM_EXPERIMENTAL"] | go | 1 | 0 | |
testdata/02_ping-redis/pingredis_test.go | // Copyright 2019 Bloomberg Finance L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pingredis_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/bloomberg/docket"
pingredis "github.com/bloomberg/docket/testdata/02_ping-redis"
)
// TestPingRedis tests the pingRedis function.
func TestPingRedis(t *testing.T) {
ctx := context.Background()
var dctx docket.Context
docket.Run(ctx, &dctx, t, func() {
testPingRedis(t, &dctx)
})
}
func testPingRedis(t *testing.T, dctx *docket.Context) {
ctx := context.Background()
var redisAddr string
if dctx.Mode() == "debug" {
const defaultRedisPort = 6379
port, err := dctx.PublishedPort(ctx, "redis", defaultRedisPort)
if err != nil {
t.Fatalf("could not determine published redis port: %v", err)
}
redisAddr = fmt.Sprintf("localhost:%d", port)
} else {
redisAddr = os.Getenv("REDIS_ADDR")
if redisAddr == "" {
t.Fatalf("missing REDIS_ADDR")
}
}
t.Logf("redisAddr = %q", redisAddr)
pong, err := pingredis.Ping(redisAddr)
t.Logf("pong = %q", pong)
if err != nil {
t.Fatalf("failed to ping redis: %v", err)
}
if pong != "PONG" {
t.Fatalf(`expected "PONG" but received %q`, pong)
}
}
| [
"\"REDIS_ADDR\""
]
| []
| [
"REDIS_ADDR"
]
| [] | ["REDIS_ADDR"] | go | 1 | 0 | |
generator/generator.go | package generator
import (
"archive/zip"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"text/template"
"github.com/gobuffalo/packr"
"github.com/rs/xid"
"go.okkur.org/reposeed/cmd/config"
templates "go.okkur.org/reposeed/cmd/templates"
)
func createDir(storagePath string, filePath string) error {
dir := strings.Split(filePath, "/")
if len(dir) > 1 {
dir = dir[:len(dir)-1]
path := strings.Join(dir, "/")
err := os.MkdirAll(storagePath+path, os.ModePerm)
if err != nil {
return fmt.Errorf("unable to create path: %s", storagePath+path)
}
return nil
}
return nil
}
func initializeZipWriter(file string) (*os.File, *zip.Writer, error) {
zipFile, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
zipWriter := zip.NewWriter(zipFile)
return zipFile, zipWriter, nil
}
func addToZip(writer *zip.Writer, file string) error {
fileContent, err := ioutil.ReadFile(file)
if err != nil {
return err
}
fileName := strings.Split(file, "/")
fileWriter, err := writer.Create(strings.Join(fileName[3:], "/"))
if err != nil {
return err
}
_, err = fileWriter.Write(fileContent)
if err != nil {
return err
}
return nil
}
func parseTemplates(box packr.Box) *template.Template {
templatesName := box.List()
templates := &template.Template{}
for _, templateName := range templatesName {
templateFile, err := box.Open(templateName)
if err != nil {
log.Fatalf("could not open the template file: %s", templateName)
}
defer templateFile.Close()
templateContent := box.String(templateName)
templates.New(templateName).Parse(templateContent)
}
return templates
}
func generateFile(config config.Config, templates *template.Template, newPath string, projectPath string, writer *zip.Writer) error {
if _, e := os.Stat(newPath); os.IsNotExist(e) {
os.MkdirAll(filepath.Dir(newPath), os.ModePerm)
}
err := createDir(projectPath, newPath)
if err != nil {
return fmt.Errorf("unable to create path %s", err)
}
file, err := os.Create(projectPath + newPath)
if err != nil {
return fmt.Errorf("unable to create file: %s", err)
}
defer file.Close()
err = templates.Lookup(newPath).Execute(file, config)
if err != nil {
return fmt.Errorf("unable to parse template: %s", err)
}
err = addToZip(writer, file.Name())
if err != nil {
return fmt.Errorf("unable to add the generated file to zip: %s", err)
}
return nil
}
// CreateFiles Generates the files based on the given config
func CreateFiles(config config.Config) (string, error) {
box := templates.GetTemplates()
temps := parseTemplates(box)
guid := xid.New()
projectPath := os.Getenv("STORAGE") + guid.String() + "/"
err := os.MkdirAll(projectPath, os.ModePerm)
if err != nil {
return "", err
}
zip, writer, err := initializeZipWriter(projectPath + config.Project.Name + ".zip")
defer zip.Close()
if err != nil {
return "", err
}
for _, templateName := range box.List() {
file, _ := box.Open(templateName)
fileStat, _ := file.Stat()
if fileStat.IsDir() {
continue
}
if strings.Contains(templateName, "partials/") {
continue
}
err := generateFile(config, temps, templateName, projectPath, writer)
if err != nil {
return "", err
}
}
err = writer.Close()
if err != nil {
log.Fatal(err)
}
return zip.Name(), nil
}
| [
"\"STORAGE\""
]
| []
| [
"STORAGE"
]
| [] | ["STORAGE"] | go | 1 | 0 | |
iterative/gcp/provider.go | package gcp
import (
"context"
"encoding/base64"
"errors"
"fmt"
"log"
"os"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
gcp_compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func ResourceMachineCreate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
project, service, err := getProjectService()
if err != nil {
return err
}
networkName := "iterative"
instanceName := d.Get("name").(string)
instanceZone := getRegion(d.Get("region").(string))
instanceHddSize := int64(d.Get("instance_hdd_size").(int))
instancePublicSshKey := fmt.Sprintf("%s:%s %s\n", "ubuntu", strings.TrimSpace(d.Get("ssh_public").(string)), "ubuntu")
instanceMetadata := map[string]string{}
for key, value := range d.Get("metadata").(map[string]interface{}) {
instanceMetadata[key] = value.(string)
}
instanceIsPreemptible := d.Get("spot").(bool)
if d.Get("spot_price").(float64) != -1 {
return errors.New("Google Cloud preemptible instances don't have a bidding price!")
}
instanceRawStartupScript, err := base64.StdEncoding.DecodeString(d.Get("startup_script").(string))
if err != nil {
return err
}
instanceStartupScript := string(instanceRawStartupScript)
instanceImageString := d.Get("image").(string)
if instanceImageString == "" {
instanceImageString = "ubuntu-os-cloud/ubuntu-2004-lts"
}
projectRegex := "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))"
if result, err := regexp.MatchString("^"+projectRegex+"/[-_a-zA-Z0-9]+"+"$", instanceImageString); err != nil || !result {
return errors.New("Malformed image name! Use project/family to select an image")
}
instanceImageComponents := strings.Split(instanceImageString, "/")
instanceImageProject := instanceImageComponents[0]
instanceImageFamily := instanceImageComponents[1]
instanceImage, err := service.Images.GetFromFamily(instanceImageProject, instanceImageFamily).Do()
if err != nil {
return err
}
instanceType, err := getInstanceType(d.Get("instance_type").(string), d.Get("instance_gpu").(string))
if err != nil {
return err
}
instanceMachineType, err := service.MachineTypes.Get(project, instanceZone, instanceType["machine"]["type"]).Do()
if err != nil {
return err
}
instanceDiskType, err := service.DiskTypes.Get(project, instanceZone, "pd-balanced").Do()
if err != nil {
return err
}
instanceHostMaintenanceBehavior := "MIGRATE"
if instanceIsPreemptible {
instanceHostMaintenanceBehavior = "TERMINATE"
}
instanceAccelerators := []*gcp_compute.AcceleratorConfig{}
if instanceType["accelerator"]["count"] != "0" {
acceleratorType, err := service.AcceleratorTypes.Get(project, instanceZone, instanceType["accelerator"]["type"]).Do()
if err != nil {
return err
}
acceleratorCount, err := strconv.Atoi(instanceType["accelerator"]["count"])
if err != nil {
return err
}
instanceHostMaintenanceBehavior = "TERMINATE"
instanceAccelerators = []*gcp_compute.AcceleratorConfig{
{
AcceleratorCount: int64(acceleratorCount),
AcceleratorType: acceleratorType.SelfLink,
},
}
}
network, err := service.Networks.Get(project, networkName).Do()
if err != nil {
networkDefinition := &gcp_compute.Network{
Name: networkName,
AutoCreateSubnetworks: true,
RoutingConfig: &gcp_compute.NetworkRoutingConfig{
RoutingMode: "REGIONAL",
},
}
networkInsertOperation, err := service.Networks.Insert(project, networkDefinition).Do()
if err != nil {
return err
}
networkGetOperationCall := service.GlobalOperations.Get(project, networkInsertOperation.Name)
_, err = waitForOperation(ctx, d.Timeout(schema.TimeoutCreate), networkGetOperationCall.Do)
if err != nil {
return err
}
network, err = service.Networks.Get(project, networkName).Do()
if err != nil {
return err
}
}
firewallEgressDefinition := &gcp_compute.Firewall{
Name: instanceName + "-egress",
Network: network.SelfLink,
Direction: "EGRESS",
Priority: 1,
TargetTags: []string{instanceName},
Allowed: []*gcp_compute.FirewallAllowed{
{
IPProtocol: "all",
},
},
DestinationRanges: []string{
"0.0.0.0/0",
},
}
firewallEgressInsertOperation, err := service.Firewalls.Insert(project, firewallEgressDefinition).Do()
if err != nil {
return err
}
firewallEgressGetOperationCall := service.GlobalOperations.Get(project, firewallEgressInsertOperation.Name)
_, err = waitForOperation(ctx, d.Timeout(schema.TimeoutCreate), firewallEgressGetOperationCall.Do)
if err != nil {
return err
}
firewallIngressDefinition := &gcp_compute.Firewall{
Name: instanceName + "-ingress",
Network: network.SelfLink,
Direction: "INGRESS",
Priority: 1,
TargetTags: []string{instanceName},
Allowed: []*gcp_compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{
"22",
},
},
},
SourceRanges: []string{
"0.0.0.0/0",
},
}
firewallIngressInsertOperation, err := service.Firewalls.Insert(project, firewallIngressDefinition).Do()
if err != nil {
return err
}
firewallIngressGetOperationCall := service.GlobalOperations.Get(project, firewallIngressInsertOperation.Name)
_, err = waitForOperation(ctx, d.Timeout(schema.TimeoutCreate), firewallIngressGetOperationCall.Do)
if err != nil {
return err
}
instanceDefinition := &gcp_compute.Instance{
Name: instanceName,
MachineType: instanceMachineType.SelfLink,
Disks: []*gcp_compute.AttachedDisk{
{
Boot: true,
AutoDelete: true,
Type: "PERSISTENT",
Mode: "READ_WRITE",
InitializeParams: &gcp_compute.AttachedDiskInitializeParams{
DiskName: instanceName,
SourceImage: instanceImage.SelfLink,
DiskSizeGb: instanceHddSize,
DiskType: instanceDiskType.SelfLink,
},
},
},
NetworkInterfaces: []*gcp_compute.NetworkInterface{
{
Network: network.SelfLink,
AccessConfigs: []*gcp_compute.AccessConfig{
{
NetworkTier: "STANDARD",
},
},
},
},
Tags: &gcp_compute.Tags{
Items: []string{instanceName},
},
Scheduling: &gcp_compute.Scheduling{
OnHostMaintenance: instanceHostMaintenanceBehavior,
Preemptible: instanceIsPreemptible,
},
Labels: instanceMetadata,
Metadata: &gcp_compute.Metadata{
Items: []*gcp_compute.MetadataItems{
{
Key: "ssh-keys",
Value: &instancePublicSshKey,
},
{
Key: "startup-script",
Value: &instanceStartupScript,
},
},
},
GuestAccelerators: instanceAccelerators,
}
instanceInsertOperation, err := service.Instances.Insert(project, instanceZone, instanceDefinition).Do()
if err != nil {
return err
}
instanceGetOperationCall := service.ZoneOperations.Get(project, instanceZone, instanceInsertOperation.Name)
_, err = waitForOperation(ctx, d.Timeout(schema.TimeoutCreate), instanceGetOperationCall.Do)
if err != nil {
return err
}
instance, err := service.Instances.Get(project, instanceZone, instanceName).Do()
if err != nil {
return err
}
d.Set("instance_ip", instance.NetworkInterfaces[0].AccessConfigs[0].NatIP)
d.Set("instance_launch_time", time.Now().String())
return nil
}
func ResourceMachineDelete(ctx context.Context, d *schema.ResourceData, m interface{}) error {
project, service, err := getProjectService()
if err != nil {
return err
}
instanceZone := getRegion(d.Get("region").(string))
instanceName := d.Get("name").(string)
service.Instances.Delete(project, instanceZone, instanceName).Do()
service.Firewalls.Delete(project, instanceName+"-ingress").Do()
service.Firewalls.Delete(project, instanceName+"-egress").Do()
return nil
}
func getProjectService() (string, *gcp_compute.Service, error) {
var credentials *google.Credentials
var err error
if credentialsData := []byte(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS_DATA")); len(credentialsData) > 0 {
credentials, err = google.CredentialsFromJSON(oauth2.NoContext, credentialsData, gcp_compute.ComputeScope)
} else {
credentials, err = google.FindDefaultCredentials(oauth2.NoContext, gcp_compute.ComputeScope)
}
if err != nil {
return "", nil, err
}
service, err := gcp_compute.New(oauth2.NewClient(oauth2.NoContext, credentials.TokenSource))
if err != nil {
return "", nil, err
}
if credentials.ProjectID == "" {
return "", nil, errors.New("Couldn't extract the project identifier from the given credentials!")
}
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS_DATA", string(credentials.JSON))
return credentials.ProjectID, service, nil
}
func waitForOperation(ctx context.Context, timeout time.Duration, function func(...googleapi.CallOption) (*gcp_compute.Operation, error), arguments ...googleapi.CallOption) (*gcp_compute.Operation, error) {
var result *gcp_compute.Operation
err := resource.RetryContext(ctx, timeout, func() *resource.RetryError {
operation, err := function(arguments...)
log.Printf("[DEBUG] Waiting for operation: (%#v, %#v)", operation, err)
if err != nil {
return resource.NonRetryableError(err)
}
if operation.Status != "DONE" {
err := errors.New("Waiting for operation to complete...")
return resource.RetryableError(err)
}
if operation.Error != nil {
err := fmt.Errorf("Operation error: %#v", *operation.Error.Errors[0])
return resource.NonRetryableError(err)
}
result = operation
return nil
})
return result, err
}
func getRegion(region string) string {
instanceRegions := make(map[string]string)
instanceRegions["us-east"] = "us-east1-c"
instanceRegions["us-west"] = "us-west1-b"
instanceRegions["eu-north"] = "europe-north1-a"
instanceRegions["eu-west"] = "europe-west1-d"
if val, ok := instanceRegions[region]; ok {
return val
}
return region
}
func getInstanceType(instanceType string, instanceGPU string) (map[string]map[string]string, error) {
instanceTypes := make(map[string]map[string]map[string]string)
instanceTypes["m"] = map[string]map[string]string{
"accelerator": {
"count": "0",
"type": "",
},
"machine": {
"type": "e2-custom-8-32768",
},
}
instanceTypes["l"] = map[string]map[string]string{
"accelerator": {
"count": "0",
"type": "",
},
"machine": {
"type": "e2-custom-32-131072",
},
}
instanceTypes["xl"] = map[string]map[string]string{
"accelerator": {
"count": "0",
"type": "",
},
"machine": {
"type": "n2-custom-64-262144",
},
}
instanceTypes["mk80"] = map[string]map[string]string{
"accelerator": {
"count": "1",
"type": "nvidia-tesla-k80",
},
"machine": {
"type": "custom-8-53248",
},
}
instanceTypes["lk80"] = map[string]map[string]string{
"accelerator": {
"count": "4",
"type": "nvidia-tesla-k80",
},
"machine": {
"type": "custom-32-131072",
},
}
instanceTypes["xlk80"] = map[string]map[string]string{
"accelerator": {
"count": "8",
"type": "nvidia-tesla-k80",
},
"machine": {
"type": "custom-64-212992-ext",
},
}
instanceTypes["mv100"] = map[string]map[string]string{
"accelerator": {
"count": "1",
"type": "nvidia-tesla-v100",
},
"machine": {
"type": "custom-8-65536-ext",
},
}
instanceTypes["lv100"] = map[string]map[string]string{
"accelerator": {
"count": "4",
"type": "nvidia-tesla-v100",
},
"machine": {
"type": "custom-32-262144-ext",
},
}
instanceTypes["xlv100"] = map[string]map[string]string{
"accelerator": {
"count": "8",
"type": "nvidia-tesla-v100",
},
"machine": {
"type": "custom-64-524288-ext",
},
}
if val, ok := instanceTypes[instanceType+instanceGPU]; ok {
return val, nil
}
if val, ok := instanceTypes[instanceType]; ok {
return map[string]map[string]string{
"accelerator": {
"count": val["accelerator"]["count"],
"type": instanceGPU,
},
"machine": {
"type": val["machine"]["type"],
},
}, nil
}
if instanceGPU != "" {
switch instanceGPU {
case "k80":
instanceGPU = "nvidia-tesla-k80"
case "v100":
instanceGPU = "nvidia-tesla-v100"
}
return map[string]map[string]string{
"accelerator": {
"count": "1",
"type": instanceGPU,
},
"machine": {
"type": instanceType,
},
}, nil
}
return map[string]map[string]string{
"accelerator": {
"count": "0",
"type": "",
},
"machine": {
"type": instanceType,
},
}, nil
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS_DATA\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS_DATA"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS_DATA"] | go | 1 | 0 | |
selfdrive/test/test_onroad.py | #!/usr/bin/env python3
import json
import os
import subprocess
import time
import numpy as np
import unittest
from collections import Counter
from pathlib import Path
from cereal import car
import cereal.messaging as messaging
from cereal.services import service_list
from common.basedir import BASEDIR
from common.timeout import Timeout
from common.params import Params
from selfdrive.controls.lib.events import EVENTS, ET
from selfdrive.hardware import EON, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.test.helpers import set_params_enabled, release_only
from tools.lib.logreader import LogReader
# Baseline CPU usage by process
PROCS = {
"selfdrive.controls.controlsd": 55.0,
"./loggerd": 45.0,
"./locationd": 9.1,
"selfdrive.controls.plannerd": 22.6,
"./_ui": 20.0,
"selfdrive.locationd.paramsd": 14.0,
"./camerad": 7.07,
"./_sensord": 6.17,
"selfdrive.controls.radard": 7.0,
"./_modeld": 4.48,
"./boardd": 3.63,
"./_dmonitoringmodeld": 2.67,
"selfdrive.thermald.thermald": 5.36,
"selfdrive.locationd.calibrationd": 2.0,
"./_soundd": 1.0,
"selfdrive.monitoring.dmonitoringd": 1.90,
"./proclogd": 1.54,
"selfdrive.logmessaged": 0.2,
"./clocksd": 0.02,
"./ubloxd": 0.02,
"selfdrive.tombstoned": 0,
"./logcatd": 0,
}
if EON:
PROCS.update({
"selfdrive.hardware.eon.androidd": 0.4,
"selfdrive.hardware.eon.shutdownd": 0.4,
})
if TICI:
PROCS.update({
"./loggerd": 70.0,
"selfdrive.controls.controlsd": 31.0,
"./camerad": 31.0,
"./_ui": 33.0,
"selfdrive.controls.plannerd": 11.7,
"./_dmonitoringmodeld": 10.0,
"selfdrive.locationd.paramsd": 5.0,
"selfdrive.controls.radard": 4.5,
"selfdrive.thermald.thermald": 3.87,
})
TIMINGS = {
# rtols: max/min, rsd
"can": [2.5, 0.35],
"pandaStates": [2.5, 0.35],
"peripheralState": [2.5, 0.35],
"sendcan": [2.5, 0.35],
"carState": [2.5, 0.35],
"carControl": [2.5, 0.35],
"controlsState": [2.5, 0.35],
"lateralPlan": [2.5, 0.5],
"longitudinalPlan": [2.5, 0.5],
"roadCameraState": [2.5, 0.35],
"driverCameraState": [2.5, 0.35],
"modelV2": [2.5, 0.35],
"driverState": [2.5, 0.35],
"liveLocationKalman": [2.5, 0.35],
}
if EON:
TIMINGS.update({
"roadCameraState": [2.5, 0.45],
})
if TICI:
TIMINGS.update({
"wideRoadCameraState": [1.5, 0.35],
})
def cputime_total(ct):
return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem
def check_cpu_usage(first_proc, last_proc):
result = "\n"
result += "------------------------------------------------\n"
result += "------------------ CPU Usage -------------------\n"
result += "------------------------------------------------\n"
r = True
dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9
for proc_name, normal_cpu_usage in PROCS.items():
first, last = None, None
try:
first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0]
last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0]
cpu_time = cputime_total(last) - cputime_total(first)
cpu_usage = cpu_time / dt * 100.
if cpu_usage > max(normal_cpu_usage * 1.15, normal_cpu_usage + 5.0):
# cpu usage is high while playing sounds
if not (proc_name == "./_soundd" and cpu_usage < 65.):
result += f"Warning {proc_name} using more CPU than normal\n"
r = False
elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)):
result += f"Warning {proc_name} using less CPU than normal\n"
r = False
result += f"{proc_name.ljust(35)} {cpu_usage:.2f}%\n"
except IndexError:
result += f"{proc_name.ljust(35)} NO METRICS FOUND {first=} {last=}\n"
r = False
result += "------------------------------------------------\n"
print(result)
return r
class TestOnroad(unittest.TestCase):
@classmethod
def setUpClass(cls):
if "DEBUG" in os.environ:
segs = filter(lambda x: os.path.exists(os.path.join(x, "rlog.bz2")), Path(ROOT).iterdir())
segs = sorted(segs, key=lambda x: x.stat().st_mtime)
cls.lr = list(LogReader(os.path.join(segs[-1], "rlog.bz2")))
return
# setup env
os.environ['REPLAY'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = "TOYOTA COROLLA TSS2 2019"
params = Params()
params.clear_all()
set_params_enabled()
# Make sure athena isn't running
os.system("pkill -9 -f athena")
# start manager and run openpilot for a minute
try:
manager_path = os.path.join(BASEDIR, "selfdrive/manager/manager.py")
proc = subprocess.Popen(["python", manager_path])
sm = messaging.SubMaster(['carState'])
with Timeout(150, "controls didn't start"):
while sm.rcv_frame['carState'] < 0:
sm.update(1000)
# make sure we get at least two full segments
route = None
cls.segments = []
with Timeout(300, "timed out waiting for logs"):
while route is None:
route = params.get("CurrentRoute", encoding="utf-8")
time.sleep(0.1)
while len(cls.segments) < 3:
segs = set()
if Path(ROOT).exists():
segs = set(Path(ROOT).glob(f"{route}--*"))
cls.segments = sorted(segs, key=lambda s: int(str(s).rsplit('--')[-1]))
time.sleep(2)
# chop off last, incomplete segment
cls.segments = cls.segments[:-1]
finally:
proc.terminate()
if proc.wait(60) is None:
proc.kill()
cls.lrs = [list(LogReader(os.path.join(str(s), "rlog.bz2"))) for s in cls.segments]
# use the second segment by default as it's the first full segment
cls.lr = list(LogReader(os.path.join(str(cls.segments[1]), "rlog.bz2")))
def test_cloudlog_size(self):
msgs = [m for m in self.lr if m.which() == 'logMessage']
total_size = sum(len(m.as_builder().to_bytes()) for m in msgs)
self.assertLess(total_size, 3.5e5)
cnt = Counter(json.loads(m.logMessage)['filename'] for m in msgs)
big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.]
self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}")
def test_cpu_usage(self):
proclogs = [m for m in self.lr if m.which() == 'procLog']
self.assertGreater(len(proclogs), service_list['procLog'].frequency * 45, "insufficient samples")
cpu_ok = check_cpu_usage(proclogs[0], proclogs[-1])
self.assertTrue(cpu_ok)
def test_mpc_execution_timings(self):
result = "\n"
result += "------------------------------------------------\n"
result += "----------------- MPC Timing ------------------\n"
result += "------------------------------------------------\n"
cfgs = [("lateralPlan", 0.05, 0.05), ("longitudinalPlan", 0.05, 0.05)]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(getattr(m, s), "solverExecutionTime") for m in self.lr if m.which() == s]
self.assertLess(min(ts), instant_max, f"high '{s}' execution time: {min(ts)}")
self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
result += f"'{s}' execution time: {min(ts)}\n"
result += f"'{s}' avg execution time: {np.mean(ts)}\n"
result += "------------------------------------------------\n"
print(result)
def test_model_execution_timings(self):
result = "\n"
result += "------------------------------------------------\n"
result += "----------------- Model Timing -----------------\n"
result += "------------------------------------------------\n"
# TODO: this went up when plannerd cpu usage increased, why?
cfgs = [("modelV2", 0.038, 0.036), ("driverState", 0.028, 0.026)]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s]
self.assertLess(min(ts), instant_max, f"high '{s}' execution time: {min(ts)}")
self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
result += f"'{s}' execution time: {min(ts)}\n"
result += f"'{s}' avg execution time: {np.mean(ts)}\n"
result += "------------------------------------------------\n"
print(result)
def test_timings(self):
passed = True
result = "\n"
result += "------------------------------------------------\n"
result += "----------------- Service Timings --------------\n"
result += "------------------------------------------------\n"
for s, (maxmin, rsd) in TIMINGS.items():
msgs = [m.logMonoTime for m in self.lr if m.which() == s]
if not len(msgs):
raise Exception(f"missing {s}")
ts = np.diff(msgs) / 1e9
dt = 1 / service_list[s].frequency
try:
np.testing.assert_allclose(np.mean(ts), dt, rtol=0.03, err_msg=f"{s} - failed mean timing check")
np.testing.assert_allclose([np.max(ts), np.min(ts)], dt, rtol=maxmin, err_msg=f"{s} - failed max/min timing check")
except Exception as e:
result += str(e) + "\n"
passed = False
if np.std(ts) / dt > rsd:
result += f"{s} - failed RSD timing check\n"
passed = False
result += f"{s.ljust(40)}: {np.array([np.mean(ts), np.max(ts), np.min(ts)])*1e3}\n"
result += f"{''.ljust(40)} {np.max(np.absolute([np.max(ts)/dt, np.min(ts)/dt]))} {np.std(ts)/dt}\n"
result += "="*67
print(result)
self.assertTrue(passed)
@release_only
def test_startup(self):
startup_alert = None
for msg in self.lrs[0]:
# can't use carEvents because the first msg can be dropped while loggerd is starting up
if msg.which() == "controlsState":
startup_alert = msg.controlsState.alertText1
break
expected = EVENTS[car.CarEvent.EventName.startup][ET.PERMANENT].alert_text_1
self.assertEqual(startup_alert, expected, "wrong startup alert")
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"SKIP_FW_QUERY",
"REPLAY",
"FINGERPRINT"
]
| [] | ["SKIP_FW_QUERY", "REPLAY", "FINGERPRINT"] | python | 3 | 0 | |
main.go | package main
import (
"context"
"fmt"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/PagerDuty/go-pagerduty"
"gopkg.in/alecthomas/kingpin.v2"
datadog "github.com/DataDog/datadog-api-client-go/api/v2/datadog"
)
var (
authToken = kingpin.Flag("auth", "Auth token").String()
team = kingpin.Flag("team", "Team").Required().String()
pdTeam = kingpin.Flag("pd-team", "Team in PagerDuty if different from Team").String()
since = kingpin.Flag("since", "Since date/time").Required().String()
until = kingpin.Flag("until", "Until date/time").Required().String()
urgency = kingpin.Flag("urgency", "Urgency").Default("high").String()
replace = kingpin.Flag("replace", "Replace titles with regex").Strings()
)
const (
filloutPlaceholder = " _TODO: please fill out_"
)
func errorf(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", a...)
}
func exit(format string, a ...interface{}) {
errorf(format, a...)
os.Exit(-1)
}
func main() {
kingpin.Parse()
*team = strings.ToLower(*team)
if *authToken == "" {
*authToken = os.Getenv("PD_AUTH_TOKEN")
}
if *authToken == "" {
exit("missing auth token (--auth or PD_AUTH_TOKEN)")
}
format := "2006-01-02"
sinceAt, err := time.Parse(format, *since)
if err != nil {
exit("Failed to parse --since: %v", err)
}
untilAt, err := time.Parse(format, *until)
if err != nil {
exit("Failed to parse --until: %v", err)
}
incidents, err := fetchIncidents(*team, sinceAt, untilAt)
if err != nil {
exit("Failed to fetch incidents from Datadog: %v", err)
}
pagerdutyTeam := *team
if pdTeam != nil {
pagerdutyTeam = *pdTeam
}
pages, err := fetchPages(pagerdutyTeam, *since, *until)
if err != nil {
exit("Failed to fetch PagerDuty pages: %v", err)
}
for _, p := range pages {
for _, i := range incidents {
if p.createdAt.After(i.createdAt.Add(-15*time.Minute)) &&
p.createdAt.Before(i.resolvedAt) {
i.pages = append(i.pages, p)
p.incidentIDs = append(p.incidentIDs, i.id)
}
}
}
var md markdown
title := strings.Title(fmt.Sprintf("%s On-Call Report %s", *team, *until))
fmt.Println("---")
fmt.Printf("title: %s\n", title)
fmt.Println("---")
md.para(fmt.Sprintf("Report for %s - %s: total incidents - %d, total pages - %d", *since, *until, len(incidents), len(pages)))
timeFormat := "2006-01-02 @15:04:05"
for _, i := range incidents {
when := i.createdAt.Local().Format(timeFormat)
md.heading(3, link(fmt.Sprintf("%s | %s | %s | %s", i.sev, i.id, i.title, when), i.link))
md.heading(4, fmt.Sprintf("IC: %s", i.commanderEmail))
md.heading(4, "Root cause")
md.para(" " + i.rootCause)
md.heading(4, "Summary")
md.para(" " + i.summary)
if len(i.customerImpactScope) != 0 {
md.heading(4, fmt.Sprintf("Customer impact (%s)", i.customerImpactDuration.String()))
md.para(" " + i.customerImpactScope)
}
md.heading(4, "PagerDuty pages")
for _, p := range i.pages {
md.unordered(1, link(p.createdAt.Local().Format(timeFormat)+" "+p.title, p.link))
}
md.br()
md.heading(4, "Action taken")
md.para(filloutPlaceholder)
md.heading(4, "Follow-up")
md.unordered(1, "**Happened before/common theme**")
md.para(filloutPlaceholder)
md.unordered(1, "**How can we prevent it**")
md.para(filloutPlaceholder)
md.unordered(1, "**Runbooks**")
md.para(filloutPlaceholder)
md.unordered(1, "**Related PRs**")
md.para(filloutPlaceholder)
md.unordered(1, "**Action items**")
md.para(filloutPlaceholder)
}
md.heading(3, "Other Pages")
for _, p := range pages {
if len(p.incidentIDs) != 0 {
continue
}
md.unordered(1, link(p.createdAt.Local().Format(timeFormat)+" "+p.title, p.link))
md.unordered(2, "**Action taken**: "+filloutPlaceholder)
md.unordered(2, "**Follow-up**: "+filloutPlaceholder)
}
fmt.Print(md.String())
}
func getRegexReplace() map[*regexp.Regexp]string {
regexReplace := map[*regexp.Regexp]string{}
for _, r := range *replace {
parts := strings.SplitN(strings.Trim(r, "/"), "/", 2)
if len(parts) < 1 {
exit("Invalid regexp replacement %s", r)
}
replaceWith := ""
if len(parts) == 2 {
replaceWith = parts[1]
}
regexReplace[regexp.MustCompile(parts[0])] = replaceWith
}
return regexReplace
}
func newIntPtr(v int) *int {
return &v
}
func newUInt64(v uint64) *uint64 {
return &v
}
type page struct {
title string
link string
createdAt time.Time
incidentIDs []string
}
type incident struct {
id string
title string
link string
sev string
commander string
commanderEmail string
rootCause string
summary string
customerImpactScope string
customerImpactDuration time.Duration
createdAt time.Time
resolvedAt time.Time
pages []*page
}
func fetchPages(team, since, until string) ([]*page, error) {
client := pagerduty.NewClient(*authToken)
regexReplace := getRegexReplace()
teams, err := client.ListTeams(pagerduty.ListTeamOptions{
APIListObject: pagerduty.APIListObject{
Limit: 10000,
},
})
if err != nil {
return nil, fmt.Errorf("failed to list teams: %v", err)
}
var teamID string
for _, t := range teams.Teams {
if strings.ToLower(t.Name) == team {
teamID = t.ID
break
}
}
if teamID == "" {
return nil, fmt.Errorf("team %s not found", team)
}
incResp, err := client.ListIncidents(pagerduty.ListIncidentsOptions{
APIListObject: pagerduty.APIListObject{
Limit: 1000,
},
TeamIDs: []string{teamID},
Since: since,
Until: until,
Urgencies: []string{*urgency},
})
if err != nil {
return nil, err
}
var pages []*page
for _, p := range incResp.Incidents {
title := p.Title
for r, replace := range regexReplace {
title = r.ReplaceAllString(title, replace)
}
createdAt, _ := time.Parse(time.RFC3339, p.CreatedAt)
pages = append(pages, &page{
title: p.Title,
link: p.HTMLURL,
createdAt: createdAt,
})
}
return pages, nil
}
func fetchIncidents(team string, since, until time.Time) ([]*incident, error) {
ctx := datadog.NewDefaultContext(context.Background())
configuration := datadog.NewConfiguration()
configuration.SetUnstableOperationEnabled("ListIncidents", true)
apiClient := datadog.NewAPIClient(configuration)
createdAfter := since.UTC().Unix()
createdBefore := until.UTC().Unix()
req := &searchRequest{
createdAfter: &createdAfter,
createdBefore: &createdBefore,
tags: []string{
"teams:" + team,
},
}
resp, r, err := searchIncidents(ctx, apiClient, req)
if err != nil {
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
return nil, fmt.Errorf("Error when calling `IncidentsApi.SearchIncidents`: %w", err)
}
if resp.Incidents == nil {
return nil, nil
}
var incidents []*incident
for _, i := range *resp.Incidents {
if i.Type != "incidents" {
continue
}
id := i.Attributes.GetPublicId()
commander := i.Attributes.Commander.User
incident := &incident{
id: fmt.Sprintf("#incident-%d", id),
title: i.Attributes.Title,
link: fmt.Sprintf("https://app.datadoghq.com/incidents/%d", id),
commander: *commander.Attributes.Name.Get(),
commanderEmail: *commander.Attributes.Email,
sev: i.Attributes.GetFields()["severity"].IncidentFieldAttributesSingleValue.GetValue(),
rootCause: i.Attributes.GetFields()["root_cause"].IncidentFieldAttributesSingleValue.GetValue(),
summary: i.Attributes.GetFields()["summary"].IncidentFieldAttributesSingleValue.GetValue(),
customerImpactScope: *i.Attributes.CustomerImpactScope.Get(),
customerImpactDuration: time.Duration(*i.Attributes.CustomerImpactDuration * int64(time.Second)),
createdAt: *i.Attributes.Created,
}
if i.Attributes.Resolved.IsSet() && i.Attributes.Resolved.Get() != nil {
incident.resolvedAt = *i.Attributes.Resolved.Get()
}
incidents = append(incidents, incident)
}
byCreatedAt := func(i, j int) bool {
return incidents[i].createdAt.Before(incidents[j].createdAt)
}
sort.Slice(incidents, byCreatedAt)
return incidents, nil
}
| [
"\"PD_AUTH_TOKEN\""
]
| []
| [
"PD_AUTH_TOKEN"
]
| [] | ["PD_AUTH_TOKEN"] | go | 1 | 0 | |
src/oca_github_bot/config.py | # Copyright (c) ACSONE SA/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
import ast
import logging
import os
from functools import wraps
from .pypi import MultiDistPublisher, RsyncDistPublisher, TwineDistPublisher
_logger = logging.getLogger("oca_gihub_bot.tasks")
def switchable(switch_name=None):
def wrap(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
sname = switch_name
if switch_name is None:
sname = func.__name__
if (
BOT_TASKS != ["all"] and sname not in BOT_TASKS
) or sname in BOT_TASKS_DISABLED:
_logger.debug("Method %s skipped (Disabled by config)", sname)
return
return func(*args, **kwargs)
return func_wrapper
return wrap
HTTP_HOST = os.environ.get("HTTP_HOST")
HTTP_PORT = int(os.environ.get("HTTP_PORT") or "8080")
GITHUB_SECRET = os.environ.get("GITHUB_SECRET")
GITHUB_LOGIN = os.environ.get("GITHUB_LOGIN")
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
GITHUB_ORG = (
os.environ.get("GITHUB_ORG") and os.environ.get("GITHUB_ORG").split(",") or []
)
GIT_NAME = os.environ.get("GIT_NAME")
GIT_EMAIL = os.environ.get("GIT_EMAIL")
ODOO_URL = os.environ.get("ODOO_URL")
ODOO_DB = os.environ.get("ODOO_DB")
ODOO_LOGIN = os.environ.get("ODOO_LOGIN")
ODOO_PASSWORD = os.environ.get("ODOO_PASSWORD")
BROKER_URI = os.environ.get("BROKER_URI", os.environ.get("REDIS_URI", "redis://queue"))
SENTRY_DSN = os.environ.get("SENTRY_DSN")
DRY_RUN = os.environ.get("DRY_RUN", "").lower() in ("1", "true", "yes")
# Coma separated list of task to run
# By default all configured tasks are run.
# Available tasks:
# delete_branch,tag_approved,tag_ready_to_merge,gen_addons_table,
# gen_addons_readme,gen_addons_icon,setuptools_odoo,merge_bot,tag_needs_review,
# migration_issue_bot
BOT_TASKS = os.environ.get("BOT_TASKS", "all").split(",")
BOT_TASKS_DISABLED = os.environ.get("BOT_TASKS_DISABLED", "").split(",")
GEN_ADDONS_TABLE_EXTRA_ARGS = (
os.environ.get("GEN_ADDONS_TABLE_EXTRA_ARGS", "")
and os.environ.get("GEN_ADDONS_TABLE_EXTRA_ARGS").split(" ")
or []
)
GEN_ADDON_README_EXTRA_ARGS = (
os.environ.get("GEN_ADDON_README_EXTRA_ARGS", "")
and os.environ.get("GEN_ADDON_README_EXTRA_ARGS").split(" ")
or []
)
GEN_ADDON_ICON_EXTRA_ARGS = (
os.environ.get("GEN_ADDON_ICON_EXTRA_ARGS", "")
and os.environ.get("GEN_ADDON_ICON_EXTRA_ARGS").split(" ")
or []
)
GITHUB_STATUS_IGNORED = os.environ.get(
"GITHUB_STATUS_IGNORED",
"ci/runbot,codecov/project,codecov/patch,coverage/coveralls",
).split(",")
GITHUB_CHECK_SUITES_IGNORED = os.environ.get(
"GITHUB_CHECK_SUITES_IGNORED", "Codecov,Dependabot"
).split(",")
MERGE_BOT_INTRO_MESSAGES = [
"On my way to merge this fine PR!",
"This PR looks fantastic, let's merge it!",
"Hey, thanks for contributing! Proceeding to merge this for you.",
"What a great day to merge this nice PR. Let's do it!",
]
APPROVALS_REQUIRED = int(os.environ.get("APPROVALS_REQUIRED", "2"))
MIN_PR_AGE = int(os.environ.get("MIN_PR_AGE", "5"))
dist_publisher = MultiDistPublisher()
SIMPLE_INDEX_ROOT = os.environ.get("SIMPLE_INDEX_ROOT")
if SIMPLE_INDEX_ROOT:
dist_publisher.add(RsyncDistPublisher(SIMPLE_INDEX_ROOT))
if os.environ.get("OCABOT_TWINE_REPOSITORIES"):
for index_url, repository_url, username, password in ast.literal_eval(
os.environ["OCABOT_TWINE_REPOSITORIES"]
):
dist_publisher.add(
TwineDistPublisher(index_url, repository_url, username, password)
)
OCABOT_USAGE = os.environ.get(
"OCABOT_USAGE",
"**Ocabot commands**\n"
"* ``ocabot merge major|minor|patch|nobump``\n"
"* ``ocabot rebase``"
"* ``ocabot migration {MODULE_NAME}``",
)
OCABOT_EXTRA_DOCUMENTATION = os.environ.get(
"OCABOT_EXTRA_DOCUMENTATION",
"**More information**\n"
" * [ocabot documentation](https://github.com/OCA/oca-github-bot/#commands)\n"
" * [OCA guidelines](https://github.com/OCA/odoo-community.org/blob/master/"
"website/Contribution/CONTRIBUTING.rst), "
'specially the "Version Numbers" section.',
)
ADOPT_AN_ADDON_MENTION = os.environ.get("ADOPT_AN_ADDON_MENTION")
MAINTAINER_CHECK_ODOO_RELEASES = (
os.environ.get("MAINTAINER_CHECK_ODOO_RELEASES")
and os.environ.get("MAINTAINER_CHECK_ODOO_RELEASES").split(",")
or []
)
| []
| []
| [
"HTTP_HOST",
"BROKER_URI",
"OCABOT_USAGE",
"ODOO_PASSWORD",
"APPROVALS_REQUIRED",
"GEN_ADDON_ICON_EXTRA_ARGS",
"GITHUB_CHECK_SUITES_IGNORED",
"GITHUB_STATUS_IGNORED",
"SENTRY_DSN",
"OCABOT_TWINE_REPOSITORIES",
"ODOO_DB",
"GIT_EMAIL",
"MAINTAINER_CHECK_ODOO_RELEASES",
"ODOO_URL",
"GITHUB_ORG",
"REDIS_URI",
"BOT_TASKS",
"GITHUB_LOGIN",
"GITHUB_SECRET",
"ADOPT_AN_ADDON_MENTION",
"OCABOT_EXTRA_DOCUMENTATION",
"SIMPLE_INDEX_ROOT",
"GITHUB_TOKEN",
"GIT_NAME",
"BOT_TASKS_DISABLED",
"GEN_ADDONS_TABLE_EXTRA_ARGS",
"MIN_PR_AGE",
"DRY_RUN",
"GEN_ADDON_README_EXTRA_ARGS",
"ODOO_LOGIN",
"HTTP_PORT"
]
| [] | ["HTTP_HOST", "BROKER_URI", "OCABOT_USAGE", "ODOO_PASSWORD", "APPROVALS_REQUIRED", "GEN_ADDON_ICON_EXTRA_ARGS", "GITHUB_CHECK_SUITES_IGNORED", "GITHUB_STATUS_IGNORED", "SENTRY_DSN", "OCABOT_TWINE_REPOSITORIES", "ODOO_DB", "GIT_EMAIL", "MAINTAINER_CHECK_ODOO_RELEASES", "ODOO_URL", "GITHUB_ORG", "REDIS_URI", "BOT_TASKS", "GITHUB_LOGIN", "GITHUB_SECRET", "ADOPT_AN_ADDON_MENTION", "OCABOT_EXTRA_DOCUMENTATION", "SIMPLE_INDEX_ROOT", "GITHUB_TOKEN", "GIT_NAME", "BOT_TASKS_DISABLED", "GEN_ADDONS_TABLE_EXTRA_ARGS", "MIN_PR_AGE", "DRY_RUN", "GEN_ADDON_README_EXTRA_ARGS", "ODOO_LOGIN", "HTTP_PORT"] | python | 31 | 0 | |
friendly-frogs/friendly_frogs/friendly_frogs/settings.py | """
Django settings for friendly_frogs project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ev5m!&s7)v2$e(jat#3$6i)=gzgip8r_o51sx5mf_wgpwy(*2$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'morning.apps.MorningConfig',
'users.apps.UsersConfig',
'django_simple_bulma',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'friendly_frogs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'friendly_frogs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "static/"
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django_simple_bulma.finders.SimpleBulmaFinder',
]
LOGIN_REDIRECT_URL = '/dashboard'
| []
| []
| [
"POSTGRES_NAME",
"POSTGRES_USER",
"POSTGRES_HOST",
"POSTGRES_PORT",
"POSTGRES_PASSWORD"
]
| [] | ["POSTGRES_NAME", "POSTGRES_USER", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_PASSWORD"] | python | 5 | 0 | |
buildtools/humanfriendly/terminal/__init__.py | # Human friendly input/output in Python.
#
# Author: Peter Odding <[email protected]>
# Last Change: March 1, 2020
# URL: https://humanfriendly.readthedocs.io
"""
Interaction with interactive text terminals.
The :mod:`~humanfriendly.terminal` module makes it easy to interact with
interactive text terminals and format text for rendering on such terminals. If
the terms used in the documentation of this module don't make sense to you then
please refer to the `Wikipedia article on ANSI escape sequences`_ for details
about how ANSI escape sequences work.
This module was originally developed for use on UNIX systems, but since then
Windows 10 gained native support for ANSI escape sequences and this module was
enhanced to recognize and support this. For details please refer to the
:func:`enable_ansi_support()` function.
.. _Wikipedia article on ANSI escape sequences: http://en.wikipedia.org/wiki/ANSI_escape_code#Sequence_elements
"""
# Standard library modules.
import codecs
import numbers
import os
import platform
import re
import subprocess
import sys
# The `fcntl' module is platform specific so importing it may give an error. We
# hide this implementation detail from callers by handling the import error and
# setting a flag instead.
try:
import fcntl
import termios
import struct
HAVE_IOCTL = True
except ImportError:
HAVE_IOCTL = False
# Modules included in our package.
from humanfriendly.compat import coerce_string, is_unicode, on_windows, which
from humanfriendly.decorators import cached
from humanfriendly.deprecation import define_aliases
from humanfriendly.text import concatenate, format
from humanfriendly.usage import format_usage
# Public identifiers that require documentation.
__all__ = (
'ANSI_COLOR_CODES',
'ANSI_CSI',
'ANSI_ERASE_LINE',
'ANSI_HIDE_CURSOR',
'ANSI_RESET',
'ANSI_SGR',
'ANSI_SHOW_CURSOR',
'ANSI_TEXT_STYLES',
'CLEAN_OUTPUT_PATTERN',
'DEFAULT_COLUMNS',
'DEFAULT_ENCODING',
'DEFAULT_LINES',
'HIGHLIGHT_COLOR',
'ansi_strip',
'ansi_style',
'ansi_width',
'ansi_wrap',
'auto_encode',
'clean_terminal_output',
'connected_to_terminal',
'enable_ansi_support',
'find_terminal_size',
'find_terminal_size_using_ioctl',
'find_terminal_size_using_stty',
'get_pager_command',
'have_windows_native_ansi_support',
'message',
'output',
'readline_strip',
'readline_wrap',
'show_pager',
'terminal_supports_colors',
'usage',
'warning',
)
ANSI_CSI = '\x1b['
"""The ANSI "Control Sequence Introducer" (a string)."""
ANSI_SGR = 'm'
"""The ANSI "Select Graphic Rendition" sequence (a string)."""
ANSI_ERASE_LINE = '%sK' % ANSI_CSI
"""The ANSI escape sequence to erase the current line (a string)."""
ANSI_RESET = '%s0%s' % (ANSI_CSI, ANSI_SGR)
"""The ANSI escape sequence to reset styling (a string)."""
ANSI_HIDE_CURSOR = '%s?25l' % ANSI_CSI
"""The ANSI escape sequence to hide the text cursor (a string)."""
ANSI_SHOW_CURSOR = '%s?25h' % ANSI_CSI
"""The ANSI escape sequence to show the text cursor (a string)."""
ANSI_COLOR_CODES = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7)
"""
A dictionary with (name, number) pairs of `portable color codes`_. Used by
:func:`ansi_style()` to generate ANSI escape sequences that change font color.
.. _portable color codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
"""
ANSI_TEXT_STYLES = dict(bold=1, faint=2, italic=3, underline=4, inverse=7, strike_through=9)
"""
A dictionary with (name, number) pairs of text styles (effects). Used by
:func:`ansi_style()` to generate ANSI escape sequences that change text
styles. Only widely supported text styles are included here.
"""
CLEAN_OUTPUT_PATTERN = re.compile(u'(\r|\n|\b|%s)' % re.escape(ANSI_ERASE_LINE))
"""
A compiled regular expression used to separate significant characters from other text.
This pattern is used by :func:`clean_terminal_output()` to split terminal
output into regular text versus backspace, carriage return and line feed
characters and ANSI 'erase line' escape sequences.
"""
DEFAULT_LINES = 25
"""The default number of lines in a terminal (an integer)."""
DEFAULT_COLUMNS = 80
"""The default number of columns in a terminal (an integer)."""
DEFAULT_ENCODING = 'UTF-8'
"""The output encoding for Unicode strings."""
HIGHLIGHT_COLOR = os.environ.get('HUMANFRIENDLY_HIGHLIGHT_COLOR', 'green')
"""
The color used to highlight important tokens in formatted text (e.g. the usage
message of the ``humanfriendly`` program). If the environment variable
``$HUMANFRIENDLY_HIGHLIGHT_COLOR`` is set it determines the value of
:data:`HIGHLIGHT_COLOR`.
"""
def ansi_strip(text, readline_hints=True):
"""
Strip ANSI escape sequences from the given string.
:param text: The text from which ANSI escape sequences should be removed (a
string).
:param readline_hints: If :data:`True` then :func:`readline_strip()` is
used to remove `readline hints`_ from the string.
:returns: The text without ANSI escape sequences (a string).
"""
pattern = '%s.*?%s' % (re.escape(ANSI_CSI), re.escape(ANSI_SGR))
text = re.sub(pattern, '', text)
if readline_hints:
text = readline_strip(text)
return text
def ansi_style(**kw):
"""
Generate ANSI escape sequences for the given color and/or style(s).
:param color: The foreground color. Three types of values are supported:
- The name of a color (one of the strings 'black', 'red',
'green', 'yellow', 'blue', 'magenta', 'cyan' or 'white').
- An integer that refers to the 256 color mode palette.
- A tuple or list with three integers representing an RGB
(red, green, blue) value.
The value :data:`None` (the default) means no escape
sequence to switch color will be emitted.
:param background: The background color (see the description
of the `color` argument).
:param bright: Use high intensity colors instead of default colors
(a boolean, defaults to :data:`False`).
:param readline_hints: If :data:`True` then :func:`readline_wrap()` is
applied to the generated ANSI escape sequences (the
default is :data:`False`).
:param kw: Any additional keyword arguments are expected to match a key
in the :data:`ANSI_TEXT_STYLES` dictionary. If the argument's
value evaluates to :data:`True` the respective style will be
enabled.
:returns: The ANSI escape sequences to enable the requested text styles or
an empty string if no styles were requested.
:raises: :exc:`~exceptions.ValueError` when an invalid color name is given.
Even though only eight named colors are supported, the use of `bright=True`
and `faint=True` increases the number of available colors to around 24 (it
may be slightly lower, for example because faint black is just black).
**Support for 8-bit colors**
In `release 4.7`_ support for 256 color mode was added. While this
significantly increases the available colors it's not very human friendly
in usage because you need to look up color codes in the `256 color mode
palette <https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit>`_.
You can use the ``humanfriendly --demo`` command to get a demonstration of
the available colors, see also the screen shot below. Note that the small
font size in the screen shot was so that the demonstration of 256 color
mode support would fit into a single screen shot without scrolling :-)
(I wasn't feeling very creative).
.. image:: images/ansi-demo.png
**Support for 24-bit colors**
In `release 4.14`_ support for 24-bit colors was added by accepting a tuple
or list with three integers representing the RGB (red, green, blue) value
of a color. This is not included in the demo because rendering millions of
colors was deemed unpractical ;-).
.. _release 4.7: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-7-2018-01-14
.. _release 4.14: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-14-2018-07-13
"""
# Start with sequences that change text styles.
sequences = [ANSI_TEXT_STYLES[k] for k, v in kw.items() if k in ANSI_TEXT_STYLES and v]
# Append the color code (if any).
for color_type in 'color', 'background':
color_value = kw.get(color_type)
if isinstance(color_value, (tuple, list)):
if len(color_value) != 3:
msg = "Invalid color value %r! (expected tuple or list with three numbers)"
raise ValueError(msg % color_value)
sequences.append(48 if color_type == 'background' else 38)
sequences.append(2)
sequences.extend(map(int, color_value))
elif isinstance(color_value, numbers.Number):
# Numeric values are assumed to be 256 color codes.
sequences.extend((
39 if color_type == 'background' else 38,
5, int(color_value)
))
elif color_value:
# Other values are assumed to be strings containing one of the known color names.
if color_value not in ANSI_COLOR_CODES:
msg = "Invalid color value %r! (expected an integer or one of the strings %s)"
raise ValueError(msg % (color_value, concatenate(map(repr, sorted(ANSI_COLOR_CODES)))))
# Pick the right offset for foreground versus background
# colors and regular intensity versus bright colors.
offset = (
(100 if kw.get('bright') else 40)
if color_type == 'background'
else (90 if kw.get('bright') else 30)
)
# Combine the offset and color code into a single integer.
sequences.append(offset + ANSI_COLOR_CODES[color_value])
if sequences:
encoded = ANSI_CSI + ';'.join(map(str, sequences)) + ANSI_SGR
return readline_wrap(encoded) if kw.get('readline_hints') else encoded
else:
return ''
def ansi_width(text):
"""
Calculate the effective width of the given text (ignoring ANSI escape sequences).
:param text: The text whose width should be calculated (a string).
:returns: The width of the text without ANSI escape sequences (an
integer).
This function uses :func:`ansi_strip()` to strip ANSI escape sequences from
the given string and returns the length of the resulting string.
"""
return len(ansi_strip(text))
def ansi_wrap(text, **kw):
"""
Wrap text in ANSI escape sequences for the given color and/or style(s).
:param text: The text to wrap (a string).
:param kw: Any keyword arguments are passed to :func:`ansi_style()`.
:returns: The result of this function depends on the keyword arguments:
- If :func:`ansi_style()` generates an ANSI escape sequence based
on the keyword arguments, the given text is prefixed with the
generated ANSI escape sequence and suffixed with
:data:`ANSI_RESET`.
- If :func:`ansi_style()` returns an empty string then the text
given by the caller is returned unchanged.
"""
start_sequence = ansi_style(**kw)
if start_sequence:
end_sequence = ANSI_RESET
if kw.get('readline_hints'):
end_sequence = readline_wrap(end_sequence)
return start_sequence + text + end_sequence
else:
return text
def auto_encode(stream, text, *args, **kw):
"""
Reliably write Unicode strings to the terminal.
:param stream: The file-like object to write to (a value like
:data:`sys.stdout` or :data:`sys.stderr`).
:param text: The text to write to the stream (a string).
:param args: Refer to :func:`~humanfriendly.text.format()`.
:param kw: Refer to :func:`~humanfriendly.text.format()`.
Renders the text using :func:`~humanfriendly.text.format()` and writes it
to the given stream. If an :exc:`~exceptions.UnicodeEncodeError` is
encountered in doing so, the text is encoded using :data:`DEFAULT_ENCODING`
and the write is retried. The reasoning behind this rather blunt approach
is that it's preferable to get output on the command line in the wrong
encoding then to have the Python program blow up with a
:exc:`~exceptions.UnicodeEncodeError` exception.
"""
text = format(text, *args, **kw)
try:
stream.write(text)
except UnicodeEncodeError:
stream.write(codecs.encode(text, DEFAULT_ENCODING))
def clean_terminal_output(text):
"""
Clean up the terminal output of a command.
:param text: The raw text with special characters (a Unicode string).
:returns: A list of Unicode strings (one for each line).
This function emulates the effect of backspace (0x08), carriage return
(0x0D) and line feed (0x0A) characters and the ANSI 'erase line' escape
sequence on interactive terminals. It's intended to clean up command output
that was originally meant to be rendered on an interactive terminal and
that has been captured using e.g. the :man:`script` program [#]_ or the
:mod:`pty` module [#]_.
.. [#] My coloredlogs_ package supports the ``coloredlogs --to-html``
command which uses :man:`script` to fool a subprocess into thinking
that it's connected to an interactive terminal (in order to get it
to emit ANSI escape sequences).
.. [#] My capturer_ package uses the :mod:`pty` module to fool the current
process and subprocesses into thinking they are connected to an
interactive terminal (in order to get them to emit ANSI escape
sequences).
**Some caveats about the use of this function:**
- Strictly speaking the effect of carriage returns cannot be emulated
outside of an actual terminal due to the interaction between overlapping
output, terminal widths and line wrapping. The goal of this function is
to sanitize noise in terminal output while preserving useful output.
Think of it as a useful and pragmatic but possibly lossy conversion.
- The algorithm isn't smart enough to properly handle a pair of ANSI escape
sequences that open before a carriage return and close after the last
carriage return in a linefeed delimited string; the resulting string will
contain only the closing end of the ANSI escape sequence pair. Tracking
this kind of complexity requires a state machine and proper parsing.
.. _capturer: https://pypi.org/project/capturer
.. _coloredlogs: https://pypi.org/project/coloredlogs
"""
cleaned_lines = []
current_line = ''
current_position = 0
for token in CLEAN_OUTPUT_PATTERN.split(text):
if token == '\r':
# Seek back to the start of the current line.
current_position = 0
elif token == '\b':
# Seek back one character in the current line.
current_position = max(0, current_position - 1)
else:
if token == '\n':
# Capture the current line.
cleaned_lines.append(current_line)
if token in ('\n', ANSI_ERASE_LINE):
# Clear the current line.
current_line = ''
current_position = 0
elif token:
# Merge regular output into the current line.
new_position = current_position + len(token)
prefix = current_line[:current_position]
suffix = current_line[new_position:]
current_line = prefix + token + suffix
current_position = new_position
# Capture the last line (if any).
cleaned_lines.append(current_line)
# Remove any empty trailing lines.
while cleaned_lines and not cleaned_lines[-1]:
cleaned_lines.pop(-1)
return cleaned_lines
def connected_to_terminal(stream=None):
"""
Check if a stream is connected to a terminal.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the stream is connected to a terminal,
:data:`False` otherwise.
See also :func:`terminal_supports_colors()`.
"""
stream = sys.stdout if stream is None else stream
try:
return stream.isatty()
except Exception:
return False
@cached
def enable_ansi_support():
"""
Try to enable support for ANSI escape sequences (required on Windows).
:returns: :data:`True` if ANSI is supported, :data:`False` otherwise.
This functions checks for the following supported configurations, in the
given order:
1. On Windows, if :func:`have_windows_native_ansi_support()` confirms
native support for ANSI escape sequences :mod:`ctypes` will be used to
enable this support.
2. On Windows, if the environment variable ``$ANSICON`` is set nothing is
done because it is assumed that support for ANSI escape sequences has
already been enabled via `ansicon <https://github.com/adoxa/ansicon>`_.
3. On Windows, an attempt is made to import and initialize the Python
package :pypi:`colorama` instead (of course for this to work
:pypi:`colorama` has to be installed).
4. On other platforms this function calls :func:`connected_to_terminal()`
to determine whether ANSI escape sequences are supported (that is to
say all platforms that are not Windows are assumed to support ANSI
escape sequences natively, without weird contortions like above).
This makes it possible to call :func:`enable_ansi_support()`
unconditionally without checking the current platform.
The :func:`~humanfriendly.decorators.cached` decorator is used to ensure
that this function is only executed once, but its return value remains
available on later calls.
"""
if have_windows_native_ansi_support():
import ctypes
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-12), 7)
return True
elif on_windows():
if 'ANSICON' in os.environ:
return True
try:
import colorama
colorama.init()
return True
except ImportError:
return False
else:
return connected_to_terminal()
def find_terminal_size():
"""
Determine the number of lines and columns visible in the terminal.
:returns: A tuple of two integers with the line and column count.
The result of this function is based on the first of the following three
methods that works:
1. First :func:`find_terminal_size_using_ioctl()` is tried,
2. then :func:`find_terminal_size_using_stty()` is tried,
3. finally :data:`DEFAULT_LINES` and :data:`DEFAULT_COLUMNS` are returned.
.. note:: The :func:`find_terminal_size()` function performs the steps
above every time it is called, the result is not cached. This is
because the size of a virtual terminal can change at any time and
the result of :func:`find_terminal_size()` should be correct.
`Pre-emptive snarky comment`_: It's possible to cache the result
of this function and use :mod:`signal.SIGWINCH <signal>` to
refresh the cached values!
Response: As a library I don't consider it the role of the
:mod:`humanfriendly.terminal` module to install a process wide
signal handler ...
.. _Pre-emptive snarky comment: http://blogs.msdn.com/b/oldnewthing/archive/2008/01/30/7315957.aspx
"""
# The first method. Any of the standard streams may have been redirected
# somewhere and there's no telling which, so we'll just try them all.
for stream in sys.stdin, sys.stdout, sys.stderr:
try:
result = find_terminal_size_using_ioctl(stream)
if min(result) >= 1:
return result
except Exception:
pass
# The second method.
try:
result = find_terminal_size_using_stty()
if min(result) >= 1:
return result
except Exception:
pass
# Fall back to conservative defaults.
return DEFAULT_LINES, DEFAULT_COLUMNS
def find_terminal_size_using_ioctl(stream):
"""
Find the terminal size using :func:`fcntl.ioctl()`.
:param stream: A stream connected to the terminal (a file object with a
``fileno`` attribute).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
Based on an `implementation found on StackOverflow <http://stackoverflow.com/a/3010495/788200>`_.
"""
if not HAVE_IOCTL:
raise NotImplementedError("It looks like the `fcntl' module is not available!")
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(stream, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
return h, w
def find_terminal_size_using_stty():
"""
Find the terminal size using the external command ``stty size``.
:param stream: A stream connected to the terminal (a file object).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
"""
stty = subprocess.Popen(['stty', 'size'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = stty.communicate()
tokens = stdout.split()
if len(tokens) != 2:
raise Exception("Invalid output from `stty size'!")
return tuple(map(int, tokens))
def get_pager_command(text=None):
"""
Get the command to show a text on the terminal using a pager.
:param text: The text to print to the terminal (a string).
:returns: A list of strings with the pager command and arguments.
The use of a pager helps to avoid the wall of text effect where the user
has to scroll up to see where the output began (not very user friendly).
If the given text contains ANSI escape sequences the command ``less
--RAW-CONTROL-CHARS`` is used, otherwise the environment variable
``$PAGER`` is used (if ``$PAGER`` isn't set :man:`less` is used).
When the selected pager is :man:`less`, the following options are used to
make the experience more user friendly:
- ``--quit-if-one-screen`` causes :man:`less` to automatically exit if the
entire text can be displayed on the first screen. This makes the use of a
pager transparent for smaller texts (because the operator doesn't have to
quit the pager).
- ``--no-init`` prevents :man:`less` from clearing the screen when it
exits. This ensures that the operator gets a chance to review the text
(for example a usage message) after quitting the pager, while composing
the next command.
"""
# Compose the pager command.
if text and ANSI_CSI in text:
command_line = ['less', '--RAW-CONTROL-CHARS']
else:
command_line = [os.environ.get('PAGER', 'less')]
# Pass some additional options to `less' (to make it more
# user friendly) without breaking support for other pagers.
if os.path.basename(command_line[0]) == 'less':
command_line.append('--no-init')
command_line.append('--quit-if-one-screen')
return command_line
@cached
def have_windows_native_ansi_support():
"""
Check if we're running on a Windows 10 release with native support for ANSI escape sequences.
:returns: :data:`True` if so, :data:`False` otherwise.
The :func:`~humanfriendly.decorators.cached` decorator is used as a minor
performance optimization. Semantically this should have zero impact because
the answer doesn't change in the lifetime of a computer process.
"""
if on_windows():
try:
# I can't be 100% sure this will never break and I'm not in a
# position to test it thoroughly either, so I decided that paying
# the price of one additional try / except statement is worth the
# additional peace of mind :-).
components = tuple(int(c) for c in platform.version().split('.'))
return components >= (10, 0, 14393)
except Exception:
pass
return False
def message(text, *args, **kw):
"""
Print a formatted message to the standard error stream.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stderr` using
:func:`auto_encode()`.
"""
auto_encode(sys.stderr, coerce_string(text) + '\n', *args, **kw)
def output(text, *args, **kw):
"""
Print a formatted message to the standard output stream.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stdout` using
:func:`auto_encode()`.
"""
auto_encode(sys.stdout, coerce_string(text) + '\n', *args, **kw)
def readline_strip(expr):
"""
Remove `readline hints`_ from a string.
:param text: The text to strip (a string).
:returns: The stripped text.
"""
return expr.replace('\001', '').replace('\002', '')
def readline_wrap(expr):
"""
Wrap an ANSI escape sequence in `readline hints`_.
:param text: The text with the escape sequence to wrap (a string).
:returns: The wrapped text.
.. _readline hints: http://superuser.com/a/301355
"""
return '\001' + expr + '\002'
def show_pager(formatted_text, encoding=DEFAULT_ENCODING):
"""
Print a large text to the terminal using a pager.
:param formatted_text: The text to print to the terminal (a string).
:param encoding: The name of the text encoding used to encode the formatted
text if the formatted text is a Unicode string (a string,
defaults to :data:`DEFAULT_ENCODING`).
When :func:`connected_to_terminal()` returns :data:`True` a pager is used
to show the text on the terminal, otherwise the text is printed directly
without invoking a pager.
The use of a pager helps to avoid the wall of text effect where the user
has to scroll up to see where the output began (not very user friendly).
Refer to :func:`get_pager_command()` for details about the command line
that's used to invoke the pager.
"""
if connected_to_terminal():
# Make sure the selected pager command is available.
command_line = get_pager_command(formatted_text)
if which(command_line[0]):
pager = subprocess.Popen(command_line, stdin=subprocess.PIPE)
if is_unicode(formatted_text):
formatted_text = formatted_text.encode(encoding)
pager.communicate(input=formatted_text)
return
output(formatted_text)
def terminal_supports_colors(stream=None):
"""
Check if a stream is connected to a terminal that supports ANSI escape sequences.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the terminal supports ANSI escape sequences,
:data:`False` otherwise.
This function was originally inspired by the implementation of
`django.core.management.color.supports_color()
<https://github.com/django/django/blob/master/django/core/management/color.py>`_
but has since evolved significantly.
"""
if on_windows():
# On Windows support for ANSI escape sequences is not a given.
have_ansicon = 'ANSICON' in os.environ
have_colorama = 'colorama' in sys.modules
have_native_support = have_windows_native_ansi_support()
if not (have_ansicon or have_colorama or have_native_support):
return False
return connected_to_terminal(stream)
def usage(usage_text):
"""
Print a human friendly usage message to the terminal.
:param text: The usage message to print (a string).
This function does two things:
1. If :data:`sys.stdout` is connected to a terminal (see
:func:`connected_to_terminal()`) then the usage message is formatted
using :func:`.format_usage()`.
2. The usage message is shown using a pager (see :func:`show_pager()`).
"""
if terminal_supports_colors(sys.stdout):
usage_text = format_usage(usage_text)
show_pager(usage_text)
def warning(text, *args, **kw):
"""
Show a warning message on the terminal.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stderr` using
:func:`auto_encode()`.
If :data:`sys.stderr` is connected to a terminal that supports colors,
:func:`ansi_wrap()` is used to color the message in a red font (to make
the warning stand out from surrounding text).
"""
text = coerce_string(text)
if terminal_supports_colors(sys.stderr):
text = ansi_wrap(text, color='red')
auto_encode(sys.stderr, text + '\n', *args, **kw)
# Define aliases for backwards compatibility.
define_aliases(
module_name=__name__,
# In humanfriendly 1.31 the find_meta_variables() and format_usage()
# functions were extracted to the new module humanfriendly.usage.
find_meta_variables='humanfriendly.usage.find_meta_variables',
format_usage='humanfriendly.usage.format_usage',
# In humanfriendly 8.0 the html_to_ansi() function and HTMLConverter
# class were extracted to the new module humanfriendly.terminal.html.
html_to_ansi='humanfriendly.terminal.html.html_to_ansi',
HTMLConverter='humanfriendly.terminal.html.HTMLConverter',
)
| []
| []
| [
"PAGER",
"HUMANFRIENDLY_HIGHLIGHT_COLOR"
]
| [] | ["PAGER", "HUMANFRIENDLY_HIGHLIGHT_COLOR"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.