filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ultralists.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is used to create the package we'll publish to PyPI. .. currentmodule:: setup.py .. moduleauthor:: Jev Kuznetsov <[email protected]> """ import importlib.util import os from pathlib import Path from setuptools import setup, find_packages from codecs import open # Use a consistent encoding. from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the relevant file with open(path.join(here, "README.rst"), encoding="utf-8") as f: long_description = f.read() # Get the base version from the library. (We'll find it in the `version.py` # file in the src directory, but we'll bypass actually loading up the library.) vspec = importlib.util.spec_from_file_location( "version", str(Path(__file__).resolve().parent / "chopit"/"version.py") ) vmod = importlib.util.module_from_spec(vspec) vspec.loader.exec_module(vmod) version = getattr(vmod, "__version__") # If the environment has a build number set... if os.getenv("buildnum") is not None: # ...append it to the version. version = f"{version}.{os.getenv('buildnum')}" setup( name='chopit', description="Static site builder based on JINJA2 templates and markdown", long_description=long_description, packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), version=version, install_requires=[ # Include dependencies here "click>=7.0,<8" ], entry_points=""" [console_scripts] chopit=chopit.cli:cli """, python_requires=">=0.0.1", license='MIT', # noqa author='Jev Kuznetsov', author_email='[email protected]', # Use the URL to the github repo. url= 'https://github.com/sjev/chopit', download_url=( f'https://github.com/sjev/' f'chopit/archive/{version}.tar.gz' ), keywords=[ # Add package keywords here. ], # See https://PyPI.python.org/PyPI?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable "Development Status :: 3 - Alpha", # Indicate who your project is intended for. "Intended Audience :: Developers", "Topic :: Software Development :: Libraries", # Pick your license. (It should match "license" above.) # noqa """License :: OSI Approved :: MIT License""", # noqa # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. "Programming Language :: Python :: 3.8", ], include_package_data=True )
[]
[]
[ "buildnum" ]
[]
["buildnum"]
python
1
0
link/solve/spamp.py
""" Path Assignment Mixed-integer Program (PAMP) Assumes you have a assignment path graph """ import os import itertools import copy from datetime import datetime import networkx as nx from docplex.mp.model import Model def solve(graph, ass_to_path, arcs_to_parents, demand_to_parents): """ Solve the assignment network problem """ s_graph = nx.Graph() key = os.environ.get('DO_API_KEY', 'local') url = os.environ.get('DO_URL', 'local') mdl = Model("assi") mdl.set_time_limit(1800) edge_assi, c_nodes = formulation( mdl, graph, ass_to_path, arcs_to_parents, demand_to_parents ) s_edges = run_mdl(mdl, edge_assi, c_nodes, url, key) for (s, e), v in s_edges.items(): if v > 0: s_graph.add_edge(s, e) return s_graph def formulation(mdl, graph, ass_to_path, arcs_to_parents, demand_to_parents): tasks = {j: f'{j}' for j in graph.nodes() if graph.out_degree[j] == 0} cands = {i: f'{i}' for i in graph.nodes() if graph.in_degree[i] == 0} edges = {(s, e):f"{s}_{e}" for s, e in list(graph.edges())} edge_assi = mdl.integer_var_dict(edges, lb=0, ub=mdl.infinity, name='x') c_nodes = mdl.integer_var_dict(cands, lb=0, ub=1, name='z') # Creating task assignment contraints # logging.info("Creating contraint 2") for j in tasks: mdl.add_constraint( mdl.sum(edge_assi[i[0], j] for i in graph.in_edges(j)) == 1 ) # Creating idicator to push cost to use minimal amount of hubs # logging.info("Creating constraint 3") for i in cands: mdl.add_constraint( mdl.sum(edge_assi[i, j[1]] for j in graph.out_edges(i)) - graph.node[i]['capacity'] * c_nodes[i] <= 0 ) mdl.add_constraint( mdl.sum(c_nodes[i] for i in cands) == 1 ) mdl.minimize( mdl.sum(d['cost'] * edge_assi[i, j] for i, j, d in graph.edges(data=True)) + mdl.sum(graph.node[i]['cost'] * c_nodes[i] for i in cands) ) return edge_assi, c_nodes def run_mdl(mdl, p_arcs, c_nodes, url, key, gap=0.00): mdl.parameters.mip.tolerances.mipgap = gap if url == 'local': sol = mdl.solve() else: sol = mdl.solve(url=url, key=key) solve_gap = sol.solve_details.mip_relative_gap solve_time = sol.solve_details.time date_time = datetime.now() with open(f'sassi_gap_time.{date_time}.txt', 'w') as out: out.write(f'{solve_gap}_{solve_time}') return sol.get_value_dict(p_arcs)
[]
[]
[ "DO_API_KEY", "DO_URL" ]
[]
["DO_API_KEY", "DO_URL"]
python
2
0
pkg/cleanoldtags/cleanOldTags.go
/* Copyright [email protected] Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cleanoldtags import ( "context" "fmt" "io/ioutil" "os" "regexp" "strings" "time" "github.com/heroku/docker-registry-client/registry" "github.com/maksim-paskal/kubernetes-manager/pkg/api" "github.com/maksim-paskal/kubernetes-manager/pkg/config" logrushookopentracing "github.com/maksim-paskal/logrus-hook-opentracing" utilsgo "github.com/maksim-paskal/utils-go" opentracing "github.com/opentracing/opentracing-go" log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const deletePrefix = "rm -rf " type RegistryData struct { ProjectID string DockerTag string TagsNotDelete []string } var exceptions []string func Execute(rootSpan opentracing.Span) { tracer := opentracing.GlobalTracer() span := tracer.StartSpan("cleanOldTagsBy", opentracing.ChildOf(rootSpan.Context())) defer span.Finish() projectIDs := []string{} projectOrigins := []string{} exceptions = getExceptions(span) opt := metav1.ListOptions{ LabelSelector: *config.Get().IngressFilter, } ingresss, _ := api.Clientset.ExtensionsV1beta1().Ingresses("").List(context.TODO(), opt) for _, ingress := range ingresss.Items { log := log.WithFields(log.Fields{ "name": ingress.Name, "namespace": ingress.Namespace, }) projectID := ingress.Annotations[config.LabelGitProjectID] projectOrigin := ingress.Annotations[config.LabelGitProjectOrigin] if utilsgo.StringInSlice(projectID, projectIDs) { log.Warnf("projectId=%s already in array", projectID) continue } if len(projectOrigin) == 0 { log.Warnf("%s is empty", config.LabelGitProjectOrigin) continue } projectIDs = append(projectIDs, ingress.Annotations[config.LabelGitProjectID]) projectOrigins = append(projectOrigins, ingress.Annotations[config.LabelGitProjectOrigin]) } items := []RegistryData{} for i, projectID := range projectIDs { dockerTag := strings.Split(projectOrigins[i], ":")[1] dockerTag = strings.TrimSuffix(dockerTag, ".git") item := RegistryData{ ProjectID: projectID, DockerTag: dockerTag, TagsNotDelete: cleanOldTagsByProject(rootSpan, projectID), } items = append(items, item) } hub, err := registry.New(*config.Get().RegistryURL, *config.Get().RegistryUser, *config.Get().RegistryPassword) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } hub.Logf = registry.Quiet var deleteCommand strings.Builder deleteCommand.WriteString("set -ex\n") for _, item := range items { for _, command := range exec(span, hub, fmt.Sprintf("%s/", item.DockerTag), item.TagsNotDelete) { deleteCommand.WriteString(command) } } const ( resultFile = "cleanOldTags.sh" resultFilePermission = 0o744 ) err = ioutil.WriteFile(resultFile, []byte(deleteCommand.String()), resultFilePermission) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } log.Infof("%s created", resultFile) } func getExceptions(rootSpan opentracing.Span) []string { tracer := opentracing.GlobalTracer() span := tracer.StartSpan("getExceptions", opentracing.ChildOf(rootSpan.Context())) defer span.Finish() allExceptions := []string{} opt := metav1.ListOptions{ LabelSelector: "app=cleanoldtags", } cms, err := api.Clientset.CoreV1().ConfigMaps(os.Getenv("POD_NAMESPACE")).List(context.TODO(), opt) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } log.Infof("found exception configmaps=%d", len(cms.Items)) for _, cm := range cms.Items { podNamespace := os.Getenv("POD_NAMESPACE") cleanoldtags, err := api.Clientset. CoreV1(). ConfigMaps(podNamespace). Get(context.TODO(), cm.Name, metav1.GetOptions{}) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } data := cleanoldtags.Data["exceptions"] for _, row := range strings.Split(data, "\n") { data := strings.Split(row, ":") if len(data) == config.KeyValueLength { if !utilsgo.StringInSlice(row, allExceptions) { allExceptions = append(allExceptions, row) } } } } return allExceptions } func cleanOldTagsByProject(rootSpan opentracing.Span, projectID string) []string { tracer := opentracing.GlobalTracer() span := tracer.StartSpan("cleanOldTagsByProject", opentracing.ChildOf(rootSpan.Context())) defer span.Finish() nonDelete := []string{} for _, exc := range exceptions { data := strings.Split(exc, ":") if data[0] == projectID { nonDelete = append(nonDelete, data[1]) } } opt := metav1.ListOptions{ LabelSelector: *config.Get().IngressFilter, } ingresss, _ := api.Clientset.ExtensionsV1beta1().Ingresses("").List(context.TODO(), opt) for _, ingress := range ingresss.Items { if ingress.Annotations[config.LabelGitProjectID] == projectID { tag := ingress.Annotations[config.LabelRegistryTag] if !utilsgo.StringInSlice(tag, nonDelete) { nonDelete = append(nonDelete, tag) } } } log.Infof("projectID=%s, tags to not delete=%s", projectID, nonDelete) return nonDelete } func exec( rootSpan opentracing.Span, hub *registry.Registry, checkRepository string, tagsToLeaveArray []string, ) []string { tracer := opentracing.GlobalTracer() span := tracer.StartSpan("exec", opentracing.ChildOf(rootSpan.Context())) defer span.Finish() var ( deleteTags []string errorTags []string err error ) repositories, err := hub.Repositories() if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } releasePattern, err := regexp.Compile(*config.Get().ReleasePatern) releaseMaxDate := time.Time{} if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Fatal() } log.Debug("start list") for _, repository := range repositories { log.Debugf("repository=%s", repository) if strings.HasPrefix(repository, checkRepository) { tags, err := hub.Tags(repository) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Error() } for _, tag := range tags { log.Debugf("repository=%s,tag=%s", repository, tag) digest, err := hub.ManifestDigest(repository, tag) /* find max release date */ if releasePattern.MatchString(tag) { releaseDate, err := time.Parse("20060102", releasePattern.FindStringSubmatch(tag)[1]) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Error() } else if releaseDate.After(releaseMaxDate) { releaseMaxDate = releaseDate } } if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Error() errorTags = append(errorTags, fmt.Sprintf("%s:%s", repository, tag)) } else { log.Debugf("%s:%s,%s", repository, tag, digest) if !utilsgo.StringInSlice(tag, tagsToLeaveArray) { deleteTags = append(deleteTags, fmt.Sprintf("%s:%s", repository, tag)) } } } } } log.Debugf("finished") var releaseNotDelete []string if (releaseMaxDate != time.Time{}) { for _, tagToDelete := range deleteTags { tag := strings.Split(tagToDelete, ":") /* find releases in range */ if releasePattern.MatchString(tag[1]) { releaseDate, err := time.Parse("20060102", releasePattern.FindStringSubmatch(tag[1])[1]) if err != nil { log. WithError(err). WithField(logrushookopentracing.SpanKey, span). Error() } else { releaseDateDiffDays := releaseMaxDate.Sub(releaseDate).Hours() / config.HoursInDay if releaseDateDiffDays < float64(*config.Get().ReleaseNotDeleteDays) { log.Debugf("image %s date in notDeleteDays", tagToDelete) releaseNotDelete = append(releaseNotDelete, tagToDelete) } } } } } log.Infof( "checkRepository=%s,errorTags=%d,deleteTags=%d,releaseNotDelete=%d", checkRepository, len(errorTags), len(deleteTags), len(releaseNotDelete), ) deleteCommand := make([]string, 0) for _, errorTag := range errorTags { image := strings.Split(errorTag, ":") deleteCommand = append( deleteCommand, fmt.Sprintf( "%s%sdocker/registry/v2/repositories/%s/_manifests/tags/%s\n", deletePrefix, *config.Get().RegistryDirectory, image[0], image[1], ), ) } for _, tagToDelete := range deleteTags { if !utilsgo.StringInSlice(tagToDelete, releaseNotDelete) { image := strings.Split(tagToDelete, ":") deleteCommand = append( deleteCommand, fmt.Sprintf( "%s%sdocker/registry/v2/repositories/%s/_manifests/tags/%s\n", deletePrefix, *config.Get().RegistryDirectory, image[0], image[1], ), ) } } return deleteCommand }
[ "\"POD_NAMESPACE\"", "\"POD_NAMESPACE\"" ]
[]
[ "POD_NAMESPACE" ]
[]
["POD_NAMESPACE"]
go
1
0
haproxy/tests/test_haproxy.py
# (C) Datadog, Inc. 2012-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) import copy import os import pytest from datadog_checks.haproxy import HAProxy from .common import ( BACKEND_AGGREGATE_ONLY_CHECK, BACKEND_CHECK, BACKEND_HOSTS_METRIC, BACKEND_LIST, BACKEND_SERVICES, BACKEND_STATUS_METRIC, BACKEND_TO_ADDR, CHECK_CONFIG_OPEN, CONFIG_TCPSOCKET, CONFIG_UNIXSOCKET, FRONTEND_CHECK, SERVICE_CHECK_NAME, STATS_SOCKET, STATS_URL, STATS_URL_OPEN, haproxy_less_than_1_7, platform_supports_sockets, requires_shareable_unix_socket, requires_socket_support, ) def _test_frontend_metrics(aggregator, shared_tag, count=1): haproxy_version = os.environ.get('HAPROXY_VERSION', '1.5.11').split('.')[:2] frontend_tags = shared_tag + ['type:FRONTEND', 'service:public', 'haproxy_service:public'] for metric_name, min_version in FRONTEND_CHECK: if haproxy_version >= min_version: aggregator.assert_metric(metric_name, tags=frontend_tags, count=count) def _test_backend_metrics(aggregator, shared_tag, services=None, add_addr_tag=False, check_aggregates=False, count=1): backend_tags = shared_tag + ['type:BACKEND'] haproxy_version = os.environ.get('HAPROXY_VERSION', '1.5.11').split('.')[:2] if not services: services = BACKEND_SERVICES for service in services: tags = backend_tags + ['service:' + service, 'haproxy_service:' + service, 'backend:BACKEND'] if check_aggregates: for metric_name, min_version in BACKEND_AGGREGATE_ONLY_CHECK: if haproxy_version >= min_version: aggregator.assert_metric(metric_name, tags=tags, count=count) for backend in BACKEND_LIST: tags = backend_tags + ['service:' + service, 'haproxy_service:' + service, 'backend:' + backend] if add_addr_tag and haproxy_version >= ['1', '7']: tags.append('server_address:{}'.format(BACKEND_TO_ADDR[backend])) for metric_name, min_version in BACKEND_CHECK: if haproxy_version >= min_version: aggregator.assert_metric(metric_name, tags=tags, count=count) def _test_backend_hosts(aggregator, count=1): for service in BACKEND_SERVICES: available_tag = ['available:true', 'service:' + service, 'haproxy_service:' + service] unavailable_tag = ['available:false', 'service:' + service, 'haproxy_service:' + service] aggregator.assert_metric(BACKEND_HOSTS_METRIC, tags=available_tag, count=count) aggregator.assert_metric(BACKEND_HOSTS_METRIC, tags=unavailable_tag, count=count) status_no_check_tags = ['service:' + service, 'haproxy_service:' + service, 'status:no_check'] aggregator.assert_metric(BACKEND_STATUS_METRIC, tags=status_no_check_tags, count=count) def _test_service_checks(aggregator, services=None, count=1): if not services: services = BACKEND_SERVICES for service in services: for backend in BACKEND_LIST: tags = ['service:' + service, 'haproxy_service:' + service, 'backend:' + backend] aggregator.assert_service_check(SERVICE_CHECK_NAME, status=HAProxy.UNKNOWN, count=count, tags=tags) tags = ['service:' + service, 'haproxy_service:' + service, 'backend:BACKEND'] aggregator.assert_service_check(SERVICE_CHECK_NAME, status=HAProxy.OK, count=count, tags=tags) @requires_socket_support @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_check(aggregator, check, instance): check = check(instance) check.check(instance) shared_tag = ["instance_url:{0}".format(STATS_URL)] _test_frontend_metrics(aggregator, shared_tag + ['active:false']) _test_backend_metrics(aggregator, shared_tag + ['active:false'], check_aggregates=True) _test_service_checks(aggregator, count=0) aggregator.assert_all_metrics_covered() @requires_socket_support @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_check_service_check(aggregator, check, instance): # Add the enable service check instance.update({"enable_service_check": True}) check = check(instance) check.check(instance) shared_tag = ["instance_url:{0}".format(STATS_URL)] _test_frontend_metrics(aggregator, shared_tag + ['active:false']) _test_backend_metrics(aggregator, shared_tag + ['active:false'], check_aggregates=True) # check was run 2 times # - FRONTEND is reporting OPEN that we ignore # - only the BACKEND aggregate is reporting UP -> OK # - The 3 individual servers are returning no check -> UNKNOWN _test_service_checks(aggregator) aggregator.assert_all_metrics_covered() @requires_socket_support @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_check_service_filter(aggregator, check, instance): instance['services_include'] = ['datadog'] instance['services_exclude'] = ['.*'] check = check(instance) check.check(instance) shared_tag = ["instance_url:{0}".format(STATS_URL)] _test_backend_metrics(aggregator, shared_tag + ['active:false'], ['datadog'], check_aggregates=True) aggregator.assert_all_metrics_covered() @requires_socket_support @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_wrong_config(aggregator, check, instance): instance['username'] = 'fake_username' with pytest.raises(Exception): check = check(instance) check.check(instance) aggregator.assert_all_metrics_covered() @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_open_config(aggregator, check): check = check(CHECK_CONFIG_OPEN) check.check(CHECK_CONFIG_OPEN) shared_tag = ["instance_url:{0}".format(STATS_URL_OPEN)] _test_frontend_metrics(aggregator, shared_tag) _test_backend_metrics(aggregator, shared_tag) _test_backend_hosts(aggregator) aggregator.assert_all_metrics_covered() @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration @pytest.mark.skipif( haproxy_less_than_1_7 or not platform_supports_sockets, reason='Sockets with operator level are only available with haproxy 1.7', ) def test_tcp_socket(aggregator, check): config = copy.deepcopy(CONFIG_TCPSOCKET) check = check(config) check.check(config) shared_tag = ["instance_url:{0}".format(STATS_SOCKET)] _test_frontend_metrics(aggregator, shared_tag) _test_backend_metrics(aggregator, shared_tag, add_addr_tag=True) aggregator.assert_all_metrics_covered() @requires_shareable_unix_socket @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_unixsocket_config(aggregator, check, dd_environment): config = copy.deepcopy(CONFIG_UNIXSOCKET) unixsocket_url = dd_environment["unixsocket_url"] config['url'] = unixsocket_url check = check(config) check.check(config) shared_tag = ["instance_url:{0}".format(unixsocket_url)] _test_frontend_metrics(aggregator, shared_tag) _test_backend_metrics(aggregator, shared_tag, add_addr_tag=True) aggregator.assert_all_metrics_covered() @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_version_metadata_http(check, datadog_agent, version_metadata): config = copy.deepcopy(CHECK_CONFIG_OPEN) check = check(config) check.check_id = 'test:123' check.check(config) datadog_agent.assert_metadata('test:123', version_metadata) # some version contains release information which is not in the test env var metadata_count = ( len(version_metadata) + 1 if ('test:123', 'version.release') in datadog_agent._metadata else len(version_metadata) ) datadog_agent.assert_metadata_count(metadata_count) @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_uptime_skip_http(check, aggregator): config = copy.deepcopy(CHECK_CONFIG_OPEN) config['startup_grace_seconds'] = 20 check = check(config) check.check(config) aggregator.assert_all_metrics_covered() @requires_shareable_unix_socket @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration def test_version_metadata_unix_socket(check, version_metadata, dd_environment, datadog_agent): config = copy.deepcopy(CONFIG_UNIXSOCKET) unixsocket_url = dd_environment["unixsocket_url"] config['url'] = unixsocket_url check = check(config) check.check_id = 'test:123' check.check(config) datadog_agent.assert_metadata('test:123', version_metadata) # some version contains release information which is not in the test env var metadata_count = ( len(version_metadata) + 1 if ('test:123', 'version.release') in datadog_agent._metadata else len(version_metadata) ) datadog_agent.assert_metadata_count(metadata_count) @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration @pytest.mark.skipif( haproxy_less_than_1_7 or not platform_supports_sockets, reason='Sockets with operator level are only available with haproxy 1.7', ) def test_version_metadata_tcp_socket(check, version_metadata, datadog_agent): config = copy.deepcopy(CONFIG_TCPSOCKET) check = check(config) check.check_id = 'test:123' check.check(config) datadog_agent.assert_metadata('test:123', version_metadata) # some version contains release information which is not in the test env var metadata_count = ( len(version_metadata) + 1 if ('test:123', 'version.release') in datadog_agent._metadata else len(version_metadata) ) datadog_agent.assert_metadata_count(metadata_count) @pytest.mark.usefixtures('dd_environment') @pytest.mark.integration @pytest.mark.skipif( haproxy_less_than_1_7 or not platform_supports_sockets, reason='Uptime is only reported on the stats socket in v1.7+', ) def test_uptime_skip_tcp(aggregator, check, dd_environment): config = copy.deepcopy(CONFIG_TCPSOCKET) config['startup_grace_seconds'] = 20 check = check(config) check.check(config) aggregator.assert_all_metrics_covered() @pytest.mark.e2e def test_e2e(dd_agent_check, instance): aggregator = dd_agent_check(CHECK_CONFIG_OPEN, rate=True) shared_tag = ["instance_url:{0}".format(STATS_URL_OPEN)] _test_frontend_metrics(aggregator, shared_tag, count=None) _test_backend_metrics(aggregator, shared_tag, count=None) _test_backend_hosts(aggregator, count=2) aggregator.assert_all_metrics_covered()
[]
[]
[ "HAPROXY_VERSION" ]
[]
["HAPROXY_VERSION"]
python
1
0
integration-cli/docker_cli_run_test.go
package main import ( "bufio" "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" "github.com/go-check/check" libcontainerUser "github.com/opencontainers/runc/libcontainer/user" ) // "test123" should be printed by docker run func (s *DockerSuite) TestRunEchoStdout(c *check.C) { out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") if out != "test123\n" { c.Fatalf("container should've printed 'test123', got '%s'", out) } } // "test" should be printed func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") if out != "test\n" { c.Errorf("container should've printed 'test'") } } // docker run should not leak file descriptors. This test relies on Unix // specific functionality and cannot run on Windows. func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory if out != "0 1 2 3\n" { c.Errorf("container should've printed '0 1 2 3', not: %s", out) } } // it should be possible to lookup Google DNS // this will fail when Internet access is unavailable func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { testRequires(c, Network, NotArm) if daemonPlatform == "windows" { // nslookup isn't present in Windows busybox. Is built-in. Further, // nslookup isn't present in nanoserver. Hence just use PowerShell... dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com") } else { dockerCmd(c, "run", DefaultImage, "nslookup", "google.com") } } // the exit code should be 0 func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { dockerCmd(c, "run", "busybox", "true") } // the exit code should be 1 func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { _, exitCode, err := dockerCmdWithError("run", "busybox", "false") c.Assert(err, checker.NotNil) c.Assert(exitCode, checker.Equals, 1) } // it should be possible to pipe in data via stdin to a process running in a container func (s *DockerSuite) TestRunStdinPipe(c *check.C) { // TODO Windows: This needs some work to make compatible. testRequires(c, DaemonIsLinux) runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") runCmd.Stdin = strings.NewReader("blahblah") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) dockerCmd(c, "wait", out) logsOut, _ := dockerCmd(c, "logs", out) containerLogs := strings.TrimSpace(logsOut) if containerLogs != "blahblah" { c.Errorf("logs didn't print the container's logs %s", containerLogs) } dockerCmd(c, "rm", out) } // the container's ID should be printed when starting a container in detached mode func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "true") out = strings.TrimSpace(out) dockerCmd(c, "wait", out) rmOut, _ := dockerCmd(c, "rm", out) rmOut = strings.TrimSpace(rmOut) if rmOut != out { c.Errorf("rm didn't print the container ID %s %s", out, rmOut) } } // the working directory should be set correctly func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { dir := "/root" image := "busybox" if daemonPlatform == "windows" { dir = `C:/Windows` } // First with -w out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") out = strings.TrimSpace(out) if out != dir { c.Errorf("-w failed to set working directory") } // Then with --workdir out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") out = strings.TrimSpace(out) if out != dir { c.Errorf("--workdir failed to set working directory") } } // pinging Google's DNS resolver should fail when we disable the networking func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { count := "-c" image := "busybox" if daemonPlatform == "windows" { count = "-n" image = WindowsBaseImage } // First using the long form --net out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") if err != nil && exitCode != 1 { c.Fatal(out, err) } if exitCode != 1 { c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } } //test --link use container name to link target func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as the networking // settings are not populated back yet on inspect. testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(out, ip+" test") { c.Fatalf("use a container name to link target failed") } } //test --link use container id to link target func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as the networking // settings are not populated back yet on inspect. testRequires(c, DaemonIsLinux) cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") cID = strings.TrimSpace(cID) ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(out, ip+" test") { c.Fatalf("use a container id to link target failed") } } func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // run a container in user-defined network udlinkNet with a link for an existing container // and a link for a container that doesn't exist dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", "--link=third:bar", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // ping to third and its alias must fail _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.NotNil) // start third container now dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") c.Assert(waitRun("third"), check.IsNil) // ping to third and its alias must succeed now _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.IsNil) } func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // Restart first container dockerCmd(c, "restart", "first") c.Assert(waitRun("first"), check.IsNil) // ping to first and its alias foo must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // Restart second container dockerCmd(c, "restart", "second") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) } func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) defaults := []string{"bridge", "host", "none"} for _, net := range defaults { out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) } } func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "net1") cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Check if default short-id alias is added automatically id := strings.TrimSpace(cid1) aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check if default short-id alias is added automatically id = strings.TrimSpace(cid2) aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) // ping to first and its network-scoped aliases _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") c.Assert(err, check.IsNil) // ping first container's short-id alias _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) c.Assert(err, check.IsNil) // Restart first container dockerCmd(c, "restart", "first") c.Assert(waitRun("first"), check.IsNil) // ping to first and its network-scoped aliases must succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") c.Assert(err, check.IsNil) // ping first container's short-id alias _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) c.Assert(err, check.IsNil) } // Issue 9677. func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "unknown flag: --exec-opt") } // Regression test for #4979 func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { var ( out string exitCode int ) // Create a file in a volume if daemonPlatform == "windows" { out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") } if exitCode != 0 { c.Fatal("1", out, exitCode) } // Read the file from another container using --volumes-from to access the volume in the second container if daemonPlatform == "windows" { out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") } if exitCode != 0 { c.Fatal("2", out, exitCode) } } // Volume path is a symlink which also exists on the host, and the host side is a file not a dir // But the volume call is just a normal volume, not a bind mount func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { var ( dockerFile string containerPath string cmd string ) // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, SameHostDaemon, DaemonIsLinux) name := "test-volume-symlink" dir, err := ioutil.TempDir("", name) if err != nil { c.Fatal(err) } defer os.RemoveAll(dir) // In the case of Windows to Windows CI, if the machine is setup so that // the temp directory is not the C: drive, this test is invalid and will // not work. if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { c.Skip("Requires TEMP to point to C: drive") } f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) if err != nil { c.Fatal(err) } f.Close() if daemonPlatform == "windows" { dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) containerPath = `c:\test\test` cmd = "tasklist" } else { dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) containerPath = "/test/test" cmd = "true" } if _, err := buildImage(name, dockerFile, false); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", containerPath, name, cmd) } // Volume path is a symlink in the container func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { var ( dockerFile string containerPath string cmd string ) // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, SameHostDaemon, DaemonIsLinux) name := "test-volume-symlink2" if daemonPlatform == "windows" { dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) containerPath = `c:\test\test` cmd = "tasklist" } else { dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) containerPath = "/test/test" cmd = "true" } if _, err := buildImage(name, dockerFile, false); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", containerPath, name, cmd) } func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { c.Fatalf("run should fail because volume is ro: exit code %d", code) } } func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { var ( volumeDir string fileInVol string ) if daemonPlatform == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { testRequires(c, DaemonIsLinux) volumeDir = "/test" fileInVol = `/test/file` } dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { c.Fatalf("run should fail because volume is ro: exit code %d", code) } } // Regression test for #1201 func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { var ( volumeDir string fileInVol string ) if daemonPlatform == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { volumeDir = "/test" fileInVol = "/test/file" } dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) } dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) } func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { testRequires(c, SameHostDaemon) prefix, slash := getPrefixAndSlashFromDaemonPlatform() hostpath := randomTmpDirPath("test", daemonPlatform) if err := os.MkdirAll(hostpath, 0755); err != nil { c.Fatalf("Failed to create %s: %q", hostpath, err) } defer os.RemoveAll(hostpath) dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") // Expect this "rw" mode to be be ignored since the inherited volume is "ro" if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") } dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") // Expect this to be read-only since both are "ro" if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") } } // Test for GH#10618 func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { path1 := randomTmpDirPath("test1", daemonPlatform) path2 := randomTmpDirPath("test2", daemonPlatform) someplace := ":/someplace" if daemonPlatform == "windows" { // Windows requires that the source directory exists before calling HCS testRequires(c, SameHostDaemon) someplace = `:c:\someplace` if err := os.MkdirAll(path1, 0755); err != nil { c.Fatalf("Failed to create %s: %q", path1, err) } defer os.RemoveAll(path1) if err := os.MkdirAll(path2, 0755); err != nil { c.Fatalf("Failed to create %s: %q", path1, err) } defer os.RemoveAll(path2) } mountstr1 := path1 + someplace mountstr2 := path2 + someplace if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { c.Fatal("Expected error about duplicate mount definitions") } else { if !strings.Contains(out, "Duplicate mount point") { c.Fatalf("Expected 'duplicate mount point' error, got %v", out) } } // Test for https://github.com/docker/docker/issues/22093 volumename1 := "test1" volumename2 := "test2" volume1 := volumename1 + someplace volume2 := volumename2 + someplace if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { c.Fatal("Expected error about duplicate mount definitions") } else { if !strings.Contains(out, "Duplicate mount point") { c.Fatalf("Expected 'duplicate mount point' error, got %v", out) } } // create failed should have create volume volumename1 or volumename2 // we should remove volumename2 or volumename2 successfully out, _ := dockerCmd(c, "volume", "ls") if strings.Contains(out, volumename1) { dockerCmd(c, "volume", "rm", volumename1) } else { dockerCmd(c, "volume", "rm", volumename2) } } // Test for #1351 func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") } func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") } // this tests verifies the ID format for the container func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") if err != nil { c.Fatal(err) } if exit != 0 { c.Fatalf("expected exit code 0 received %d", exit) } match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) if err != nil { c.Fatal(err) } if !match { c.Fatalf("Invalid container ID: %s", out) } } // Test that creating a container with a volume doesn't crash. Regression test for #995. func (s *DockerSuite) TestRunCreateVolume(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. // Note that this bug happens only with symlinks with a target that starts with '/'. func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) testRequires(c, DaemonIsLinux) image := "docker-test-createvolumewithsymlink" buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN ln -s home /bar`) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { c.Fatalf("could not build '%s': %v", image, err) } _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") if err != nil || exitCode != 0 { c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") c.Assert(err, checker.IsNil) _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") if err != nil || exitCode != 0 { c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) } _, err = os.Stat(volPath) if !os.IsNotExist(err) { c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) } } // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, DaemonIsLinux) name := "docker-test-volumesfromsymlinkpath" prefix := "" dfContents := `FROM busybox RUN ln -s home /foo VOLUME ["/foo/bar"]` if daemonPlatform == "windows" { prefix = `c:` dfContents = `FROM ` + WindowsBaseImage + ` RUN mkdir c:\home RUN mklink /D c:\foo c:\home VOLUME ["c:/foo/bar"] ENTRYPOINT c:\windows\system32\cmd.exe` } buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(dfContents) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) if err != nil || exitCode != 0 { c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) } _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") if err != nil || exitCode != 0 { c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } } func (s *DockerSuite) TestRunExitCode(c *check.C) { var ( exit int err error ) _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") if err == nil { c.Fatal("should not have a non nil error") } if exit != 72 { c.Fatalf("expected exit code 72 received %d", exit) } } func (s *DockerSuite) TestRunUserDefaults(c *check.C) { expected := "uid=0(root) gid=0(root)" if daemonPlatform == "windows" { expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" } out, _ := dockerCmd(c, "run", "busybox", "id") if !strings.Contains(out, expected) { c.Fatalf("expected '%s' got %s", expected, out) } } func (s *DockerSuite) TestRunUserByName(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") if !strings.Contains(out, "uid=0(root) gid=0(root)") { c.Fatalf("expected root user got %s", out) } } func (s *DockerSuite) TestRunUserByID(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { c.Fatalf("expected daemon user got %s", out) } } func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux, NotArm) out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") if err == nil { c.Fatal("No error, but must be.", out) } if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { c.Fatalf("expected error about uids range, got %s", out) } } func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") if err == nil { c.Fatal("No error, but must be.", out) } if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { c.Fatalf("expected error about uids range, got %s", out) } } func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") if err != nil { c.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { c.Fatalf("expected daemon user got %s", out) } } func (s *DockerSuite) TestRunUserNotFound(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") if err == nil { c.Fatal("unknown user should cause container to fail") } } func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { sleepTime := "2" group := sync.WaitGroup{} group.Add(2) errChan := make(chan error, 2) for i := 0; i < 2; i++ { go func() { defer group.Done() _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) errChan <- err }() } group.Wait() close(errChan) for err := range errChan { c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestRunEnvironment(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") cmd.Env = append(os.Environ(), "TRUE=false", "TRICKY=tri\ncky\n", ) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOSTNAME=testing", "FALSE=true", "TRUE=false", "TRICKY=tri", "cky", "", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) // Test to make sure that when we use -e on env vars that are // not set in our local env that they're removed (if present) in // the container cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") cmd.Env = appendBaseEnv(true) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) // Test to make sure that when we use -e on env vars that are // already in the env that we're overriding them cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root2", "HOSTNAME=bar", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { if daemonPlatform == "windows" { // Windows busybox does not have ping. Use built in ping instead. dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") } } func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { // TODO Windows: This is Linux specific as --link is not supported and // this will be deprecated in favor of container networking model. testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "--name", "linked", "busybox", "true") _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") if err == nil { c.Fatal("Expected error") } } // #7851 hostname outside container shows FQDN, inside only shortname // For testing purposes it is not required to set host's hostname directly // and use "--net=host" (as the original issue submitter did), as the same // codepath is executed with "docker run -h <hostname>". Both were manually // tested, but this testcase takes the simpler path of using "run -h .." func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { // TODO Windows: -h is not yet functional. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) } } func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { // Not applicable for Windows as Windows daemon does not support // the concept of --privileged, and mknod is a Unix concept. testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { // Not applicable for Windows as Windows daemon does not support // the concept of --privileged, and mknod is a Unix concept. testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") if err == nil { c.Fatal(err, out) } } func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") if err == nil { c.Fatal(err, out) } } func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunGroupAdd(c *check.C) { // Not applicable for Windows as there is no concept of --group-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" if actual := strings.Trim(out, "\r\n"); actual != groupsList { c.Fatalf("expected output %s received %s", groupsList, actual) } } func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { // Not applicable for Windows as there is no concept of --privileged testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux, NotArm) if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { c.Fatal("sys should not be writable in a non privileged container") } } func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { c.Fatalf("sys should be writable in privileged container") } } func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux) if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { c.Fatal("proc should not be writable in a non privileged container") } } func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of --privileged testRequires(c, DaemonIsLinux, NotUserNamespace) if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { c.Fatalf("proc should be writable in privileged container") } } func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { // Not applicable on Windows as /dev/ is a Unix specific concept // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") deviceLineFields := strings.Fields(out) deviceLineFields[6] = "" deviceLineFields[7] = "" deviceLineFields[8] = "" expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} if !(reflect.DeepEqual(deviceLineFields, expected)) { c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) } } func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { // Not applicable on Windows as /dev/ is a Unix specific concept testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { // Not applicable on Windows as it does not support chroot testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "busybox", "chroot", "/", "true") } func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { c.Fatalf("expected output /dev/nulo, received %s", actual) } } func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { c.Fatalf("expected output /dev/zero, received %s", actual) } } func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") if err == nil { c.Fatalf("run container with device mode ro should fail") } } func (s *DockerSuite) TestRunModeHostname(c *check.C) { // Not applicable on Windows as Windows does not support -h testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { c.Fatalf("expected 'testhostname', but says: %q", actual) } out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") hostname, err := os.Hostname() if err != nil { c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { c.Fatalf("expected %q, but says: %q", hostname, actual) } } func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") expected := "/\n" if daemonPlatform == "windows" { expected = "C:" + expected } if out != expected { c.Fatalf("pwd returned %q (expected %s)", s, expected) } } func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { if daemonPlatform == "windows" { // Windows busybox will fail with Permission Denied on items such as pagefile.sys dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) } else { dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") } } func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { mount := "/:/" targetDir := "/host" if daemonPlatform == "windows" { mount = `c:\:c\` targetDir = "c:/host" // Forward slash as using busybox } out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) if err == nil { c.Fatal(out, err) } } // Verify that a container gets default DNS when only localhost resolvers exist func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { // Not applicable on Windows as this is testing Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) // preserve original resolv.conf for restoring after test origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } // defer restored original conf defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { c.Fatal(err) } }() // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by // GetNameservers(), leading to a replacement of nameservers with the default set tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { c.Fatal(err) } actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") // check that the actual defaults are appended to the commented out // localhost resolver (which should be preserved) // NOTE: if we ever change the defaults from google dns, this will break expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" if actual != expected { c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) } } func (s *DockerSuite) TestRunDNSOptions(c *check.C) { // Not applicable on Windows as Windows does not support --dns*, or // the Unix-specific functionality of resolv.conf. testRequires(c, DaemonIsLinux) out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") // The client will get a warning on stderr when setting DNS to a localhost address; verify this: if !strings.Contains(stderr, "Localhost DNS setting") { c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) } actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) } out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) if actual != "nameserver 127.0.0.1 options ndots:3" { c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) } } func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { testRequires(c, DaemonIsLinux) out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) } } func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { // Not applicable on Windows as testing Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) hostSearch := resolvconf.GetSearchDomains(origResolvConf) var out string out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) } actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) if len(actualNameservers) != len(hostNameservers) { c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNameservers[i] { c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) } } if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) } // test with file tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { c.Fatal(err) } // put the old resolvconf back defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { c.Fatal(err) } }() resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) hostSearch = resolvconf.GetSearchDomains(resolvConf) out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) } actualSearch = resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } } // Test to see if a non-root user can resolve a DNS name. Also // check if the container resolv.conf file has at least 0644 perm. func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { // Not applicable on Windows as Windows does not support --user testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") cID, err := getIDByName("testperm") if err != nil { c.Fatal(err) } fmode := (os.FileMode)(0644) finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) if err != nil { c.Fatal(err) } if (finfo.Mode() & fmode) != fmode { c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) } } // Test if container resolv.conf gets updated the next time it restarts // if host /etc/resolv.conf has changed. This only applies if the container // uses the host's /etc/resolv.conf and does not have any dns options provided. func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { // Not applicable on Windows as testing unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) c.Skip("Unstable test, to be re-activated once #19937 is resolved") tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") //take a copy of resolv.conf for restoring after test completes resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { c.Fatal(err) } // This test case is meant to test monitoring resolv.conf when it is // a regular file not a bind mounc. So we unmount resolv.conf and replace // it with a file containing the original settings. mounted, err := mount.Mounted("/etc/resolv.conf") if err != nil { c.Fatal(err) } if mounted { cmd := exec.Command("umount", "/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { c.Fatal(err) } } //cleanup defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } }() //1. test that a restarting container gets an updated resolv.conf dockerCmd(c, "run", "--name=first", "busybox", "true") containerID1, err := getIDByName("first") if err != nil { c.Fatal(err) } // replace resolv.conf with our temporary copy bytesResolvConf := []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "first") // check for update in container containerResolv, err := readContainerFile(containerID1, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } */ //2. test that a restarting container does not receive resolv.conf updates // if it modified the container copy of the starting point resolv.conf dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") containerID2, err := getIDByName("second") if err != nil { c.Fatal(err) } //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } // start the container again dockerCmd(c, "start", "second") // check for update in container containerResolv, err = readContainerFile(containerID2, "resolv.conf") if err != nil { c.Fatal(err) } if bytes.Equal(containerResolv, resolvConfSystem) { c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) } //3. test that a running container's resolv.conf is not modified while running out, _ := dockerCmd(c, "run", "-d", "busybox", "top") runningContainerID := strings.TrimSpace(out) // replace resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { c.Fatal(err) } if bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) } //4. test that a running container's resolv.conf is updated upon restart // (the above container is still running..) dockerCmd(c, "restart", runningContainerID) // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) } //5. test that additions of a localhost resolver are cleaned from // host resolv.conf before updating container's resolv.conf copies // replace resolv.conf with a localhost-only nameserver copy bytesResolvConf = []byte(tmpLocalhostResolvConf) if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "first") // our first exited container ID should have been updated, but with default DNS // after the cleanup of resolv.conf found only a localhost nameserver: containerResolv, err = readContainerFile(containerID1, "resolv.conf") if err != nil { c.Fatal(err) } expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" if !bytes.Equal(containerResolv, []byte(expected)) { c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) } //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update // of containers' resolv.conf. // Restore the original resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } // Run the container so it picks up the old settings dockerCmd(c, "run", "--name=third", "busybox", "true") containerID3, err := getIDByName("third") if err != nil { c.Fatal(err) } // Create a modified resolv.conf.aside and override resolv.conf with it bytesResolvConf = []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { c.Fatal(err) } err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") if err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "third") // check for update in container containerResolv, err = readContainerFile(containerID3, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) } //cleanup, restore original resolv.conf happens in defer func() } func (s *DockerSuite) TestRunAddHost(c *check.C) { // Not applicable on Windows as it does not support --add-host testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") actual := strings.Trim(out, "\r\n") if actual != "86.75.30.9\textra" { c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode // but using --attach instead of -a to make sure we read the flag correctly func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") _, stderr, _, err := runCommandWithStdoutStderr(cmd) if err == nil { c.Fatal("Container should have exited with error code different than 0") } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { c.Fatal("Should have been returned an error with conflicting options -a and -d") } } func (s *DockerSuite) TestRunState(c *check.C) { // TODO Windows: This needs some rework as Windows busybox does not support top testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) state := inspectField(c, id, "State.Running") if state != "true" { c.Fatal("Container state is 'not running'") } pid1 := inspectField(c, id, "State.Pid") if pid1 == "0" { c.Fatal("Container state Pid 0") } dockerCmd(c, "stop", id) state = inspectField(c, id, "State.Running") if state != "false" { c.Fatal("Container state is 'running'") } pid2 := inspectField(c, id, "State.Pid") if pid2 == pid1 { c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } dockerCmd(c, "start", id) state = inspectField(c, id, "State.Running") if state != "true" { c.Fatal("Container state is 'not running'") } pid3 := inspectField(c, id, "State.Pid") if pid3 == pid1 { c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } } // Test for #1737 func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { // Not applicable on Windows as it does not support uid or gid in this way testRequires(c, DaemonIsLinux) name := "testrunvolumesuidgid" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, true) if err != nil { c.Fatal(err) } // Test that the uid and gid is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") out = strings.TrimSpace(out) if out != "dockerio:dockerio" { c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) } } // Test for #1582 func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { // TODO Windows, post RS1. Windows does not yet support volume functionality // that copies from the image to the volume. testRequires(c, DaemonIsLinux) name := "testruncopyvolumecontent" _, err := buildImage(name, `FROM busybox RUN mkdir -p /hello/local && echo hello > /hello/local/world`, true) if err != nil { c.Fatal(err) } // Test that the content is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { c.Fatal("Container failed to transfer content to volume") } } func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { name := "testrunmdcleanuponentrypoint" if _, err := buildImage(name, `FROM busybox ENTRYPOINT ["echo"] CMD ["testingpoint"]`, true); err != nil { c.Fatal(err) } out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) if exit != 0 { c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) } out = strings.TrimSpace(out) expected := "root" if daemonPlatform == "windows" { if strings.Contains(WindowsBaseImage, "windowsservercore") { expected = `user manager\containeradministrator` } else { expected = `ContainerAdministrator` // nanoserver } } if out != expected { c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage) } } // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { existingFile := "/bin/cat" expected := "not a directory" if daemonPlatform == "windows" { existingFile = `\windows\system32\ntdll.dll` expected = `Cannot mkdir: \windows\system32\ntdll.dll is not a directory.` } out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) } } func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { name := "testrunexitonstdinclose" meow := "/bin/cat" delay := 60 if daemonPlatform == "windows" { meow = "cat" } runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) stdin, err := runCmd.StdinPipe() if err != nil { c.Fatal(err) } stdout, err := runCmd.StdoutPipe() if err != nil { c.Fatal(err) } if err := runCmd.Start(); err != nil { c.Fatal(err) } if _, err := stdin.Write([]byte("hello\n")); err != nil { c.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { c.Fatal(err) } line = strings.TrimSpace(line) if line != "hello" { c.Fatalf("Output should be 'hello', got '%q'", line) } if err := stdin.Close(); err != nil { c.Fatal(err) } finish := make(chan error) go func() { finish <- runCmd.Wait() close(finish) }() select { case err := <-finish: c.Assert(err, check.IsNil) case <-time.After(time.Duration(delay) * time.Second): c.Fatal("docker run failed to exit on stdin close") } state := inspectField(c, name, "State.Running") if state != "false" { c.Fatal("Container must be stopped after stdin closing") } } // Test run -i --restart xxx doesn't hang func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { name := "test-inter-restart" result := icmd.StartCmd(icmd.Cmd{ Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, Stdin: bytes.NewBufferString("exit 11"), }) c.Assert(result.Error, checker.IsNil) defer func() { dockerCmdWithResult("stop", name).Assert(c, icmd.Success) }() result = icmd.WaitOnCmd(60*time.Second, result) c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) } // Test for #2267 func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writehosts" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") if !strings.Contains(out, "test2267") { c.Fatal("/etc/hosts should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } func eqToBaseDiff(out string, c *check.C) bool { name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") cID, err := getIDByName(name) c.Assert(err, check.IsNil) baseDiff, _ := dockerCmd(c, "diff", cID) baseArr := strings.Split(baseDiff, "\n") sort.Strings(baseArr) outArr := strings.Split(out, "\n") sort.Strings(outArr) return sliceEq(baseArr, outArr) } func sliceEq(a, b []string) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } // Test for #2267 func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writehostname" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") if !strings.Contains(out, "test2267") { c.Fatal("/etc/hostname should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } // Test for #2267 func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writeresolv" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") if !strings.Contains(out, "test2267") { c.Fatal("/etc/resolv.conf should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { // Cannot run on Windows as Windows does not support --device testRequires(c, DaemonIsLinux) name := "baddevice" out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") if err == nil { c.Fatal("Run should fail with bad device") } expected := `"/etc": not a device node` if !strings.Contains(out, expected) { c.Fatalf("Output should contain %q, actual out: %q", expected, out) } } func (s *DockerSuite) TestRunEntrypoint(c *check.C) { name := "entrypoint" out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") expected := "foobar" if out != expected { c.Fatalf("Output should be %q, actual out: %q", expected, out) } } func (s *DockerSuite) TestRunBindMounts(c *check.C) { testRequires(c, SameHostDaemon) if daemonPlatform == "linux" { testRequires(c, DaemonIsLinux, NotUserNamespace) } prefix, _ := getPrefixAndSlashFromDaemonPlatform() tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", c) // Test reading from a read-only bind mount out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") if !strings.Contains(out, "touch-me") { c.Fatal("Container failed to read from bind mount") } // test writing to bind mount if daemonPlatform == "windows" { dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") } else { dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") } readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist // test mounting to an illegal destination directory _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") if err == nil { c.Fatal("Container bind mounted illegal directory") } // Windows does not (and likely never will) support mounting a single file if daemonPlatform != "windows" { // test mount a file dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist expected := "yotta" if content != expected { c.Fatalf("Output should be %q, actual out: %q", expected, content) } } } // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { // Skip on Windows. Base image on Windows has a CMD set in the image. testRequires(c, DaemonIsLinux) tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpCidFile := path.Join(tmpDir, "cid") image := "emptyfs" if daemonPlatform == "windows" { // Windows can't support an emptyfs image. Just use the regular Windows image image = WindowsBaseImage } out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) if err == nil { c.Fatalf("Run without command must fail. out=%s", out) } else if !strings.Contains(out, "No command specified") { c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) } if _, err := os.Stat(tmpCidFile); err == nil { c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) } } // #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { c.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") defer os.RemoveAll(tmpDir) out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") id := strings.TrimSpace(out) buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { c.Fatal(err) } cid := string(buffer) if len(cid) != 64 { c.Fatalf("--cidfile should be a long id, not %q", id) } if cid != id { c.Fatalf("cid must be equal to %s, got %s", id, cid) } } func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { mac := "12:34:56:78:9a:bc" var out string if daemonPlatform == "windows" { out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs } else { out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") } actualMac := strings.TrimSpace(out) if actualMac != mac { c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) } } func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, DaemonIsLinux) mac := "12:34:56:78:9a:bc" out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") id := strings.TrimSpace(out) inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") if inspectedMac != mac { c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) } } // test docker run use an invalid mac address func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") //use an invalid mac address should with an error out if err == nil || !strings.Contains(out, "is not a valid mac address") { c.Fatalf("run with an invalid --mac-address should with error out") } } func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") id := strings.TrimSpace(out) ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") out, _, err := runCommandWithOutput(iptCmd) if err != nil { c.Fatal(err, out) } if err := deleteContainer(id); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") } func (s *DockerSuite) TestRunPortInUse(c *check.C) { // TODO Windows. The duplicate NAT message returned by Windows will be // changing as is currently completely undecipherable. Does need modifying // to run sh rather than top though as top isn't in Windows busybox. testRequires(c, SameHostDaemon, DaemonIsLinux) port := "1234" dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") if err == nil { c.Fatalf("Binding on used port must fail") } if !strings.Contains(out, "port is already allocated") { c.Fatalf("Out must be about \"port is already allocated\", got %s", out) } } // https://github.com/docker/docker/issues/12148 func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { // TODO Windows. -P is not yet supported testRequires(c, DaemonIsLinux) // allocate a dynamic port to get the most recent out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", id, "80") strPort := strings.Split(strings.TrimSpace(out), ":")[1] port, err := strconv.ParseInt(strPort, 10, 64) if err != nil { c.Fatalf("invalid port, got: %s, error: %s", strPort, err) } // allocate a static port and a dynamic port together, with static port // takes the next recent port in dynamic port range. dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") } // Regression test for #7792 func (s *DockerSuite) TestRunMountOrdering(c *check.C) { // TODO Windows: Post RS1. Windows does not support nested mounts. testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) prefix, _ := getPrefixAndSlashFromDaemonPlatform() tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir2) // Create a temporary tmpfs mounc. fooDir := filepath.Join(tmpDir, "foo") if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { c.Fatalf("failed to mkdir at %s - %s", fooDir, err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") } // Regression test for https://github.com/docker/docker/issues/8259 func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { // Not applicable on Windows as Windows does not support volumes testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) prefix, _ := getPrefixAndSlashFromDaemonPlatform() tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) linkPath := os.TempDir() + "/testlink2" if err := os.Symlink(tmpDir, linkPath); err != nil { c.Fatal(err) } defer os.RemoveAll(linkPath) // Create first container dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") // Create second container with same symlinked path // This will fail if the referenced issue is hit with a "Volume exists" error dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") } //GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { // While Windows supports volumes, it does not support --add-host hence // this test is not applicable on Windows. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") if !strings.Contains(out, "nameserver 127.0.0.1") { c.Fatal("/etc volume mount hides /etc/resolv.conf") } out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") if !strings.Contains(out, "test123") { c.Fatal("/etc volume mount hides /etc/hostname") } out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") out = strings.Replace(out, "\n", " ", -1) if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { c.Fatal("/etc volume mount hides /etc/hosts") } } func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { // TODO Windows (Post RS1). Windows does not support volumes which // are pre-populated such as is built in the dockerfile used in this test. testRequires(c, DaemonIsLinux) prefix, slash := getPrefixAndSlashFromDaemonPlatform() if _, err := buildImage("dataimage", `FROM busybox RUN ["mkdir", "-p", "/foo"] RUN ["touch", "/foo/bar"]`, true); err != nil { c.Fatal(err) } dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) } tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) } } func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { // just run with unknown image cmd := exec.Command(dockerBinary, "run", "asdfsg") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err == nil { c.Fatal("Run with unknown image should fail") } if stdout.Len() != 0 { c.Fatalf("Stdout contains output from pull: %s", stdout) } } func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { testRequires(c, SameHostDaemon) prefix, slash := getPrefixAndSlashFromDaemonPlatform() if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME `+prefix+`/foo/`, true); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) if err != errMountNotFound { c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) c.Assert(err, check.IsNil) if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) if err != errMountNotFound { c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") c.Assert(err, check.IsNil) if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) } } // Regression test for #3631 func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { // TODO Windows: This should be able to run on Windows if can find an // alternate to /dev/zero and /dev/stdout. testRequires(c, DaemonIsLinux) cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") stdout, err := cont.StdoutPipe() if err != nil { c.Fatal(err) } if err := cont.Start(); err != nil { c.Fatal(err) } n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { c.Fatal(err) } expected := 2 * 1024 * 2000 if n != expected { c.Fatalf("Expected %d, got %d", expected, n) } } func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { // TODO Windows: -P is not currently supported. Also network // settings are not propagated back. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") id := strings.TrimSpace(out) portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") var ports nat.PortMap if err := json.Unmarshal([]byte(portstr), &ports); err != nil { c.Fatal(err) } for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { c.Fatalf("Port is not mapped for the port %s", port) } } } func (s *DockerSuite) TestRunExposePort(c *check.C) { out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) c.Assert(out, checker.Contains, "invalid range format for --expose") } func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostIpc, err := os.Readlink("/proc/1/ns/ipc") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if hostIpc != out { c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if hostIpc == out { c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) } } func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") id := strings.TrimSpace(out) state := inspectField(c, id, "State.Running") if state != "true" { c.Fatal("Container state is 'not running'") } pid1 := inspectField(c, id, "State.Pid") parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) if err != nil { c.Fatal(err) } out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if parentContainerIpc != out { c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) } catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") if catOutput != "test" { c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) } // check that /dev/mqueue is actually of mqueue type grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) } lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") lsOutput = strings.Trim(lsOutput, "\n") if lsOutput != "toto" { c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) } } func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") if !strings.Contains(out, "abcd1234") || err == nil { c.Fatalf("run IPC from a non exists container should with correct error out") } } func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox") id := strings.TrimSpace(out) out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") if err == nil { c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) } } func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") id := strings.TrimSpace(out) state := inspectField(c, id, "State.Running") if state != "true" { c.Fatal("Container state is 'not running'") } pid1 := inspectField(c, id, "State.Pid") parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) if err != nil { c.Fatal(err) } out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") out = strings.Trim(out, "\n") if parentContainerPid != out { c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) } } func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") if !strings.Contains(out, "abcd1234") || err == nil { c.Fatalf("run PID from a non exists container should with correct error out") } } func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox") id := strings.TrimSpace(out) out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") if err == nil { c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) } } func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") defer os.Remove("/dev/mqueue/toto") defer os.Remove("/dev/shm/test") volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") c.Assert(err, checker.IsNil) if volPath != "/dev/shm" { c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) } out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") if out != "test" { c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) } // Check that the mq was created if _, err := os.Stat("/dev/mqueue/toto"); err != nil { c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) } } func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) pid1 := inspectField(c, id, "State.Pid") parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { c.Fatal(err) } out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if parentContainerNet != out { c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) } } func (s *DockerSuite) TestRunModePIDHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostPid, err := os.Readlink("/proc/1/ns/pid") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") out = strings.Trim(out, "\n") if hostPid != out { c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") out = strings.Trim(out, "\n") if hostPid == out { c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) } } func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) hostUTS, err := os.Readlink("/proc/1/ns/uts") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") out = strings.Trim(out, "\n") if hostUTS != out { c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") out = strings.Trim(out, "\n") if hostUTS == out { c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) } out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) } func (s *DockerSuite) TestRunTLSVerify(c *check.C) { // Remote daemons use TLS and this test is not applicable when TLS is required. testRequires(c, SameHostDaemon) if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { c.Fatalf("Should have worked: %v:\n%v", err, out) } // Regardless of whether we specify true or false we need to // test to make sure tls is turned on if --tlsverify is specified at all result := dockerCmdWithResult("--tlsverify=false", "ps") result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) result = dockerCmdWithResult("--tlsverify=true", "ps") result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) } func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { // TODO Windows. Once moved to libnetwork/CNM, this may be able to be // re-instated. testRequires(c, DaemonIsLinux) // first find allocator current position out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", id) out = strings.TrimSpace(out) if out == "" { c.Fatal("docker port command output is empty") } out = strings.Split(out, ":")[1] lastPort, err := strconv.Atoi(out) if err != nil { c.Fatal(err) } port := lastPort + 1 l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) if err != nil { c.Fatal(err) } defer l.Close() out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") id = strings.TrimSpace(out) dockerCmd(c, "port", id) } func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { errChan := make(chan error) go func() { defer close(errChan) cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") if _, err := cmd.StdinPipe(); err != nil { errChan <- err return } expected := "the input device is not a TTY" if runtime.GOOS == "windows" { expected += ". If you are using mintty, try prefixing the command with 'winpty'" } if out, _, err := runCommandWithOutput(cmd); err == nil { errChan <- fmt.Errorf("run should have failed") return } else if !strings.Contains(out, expected) { errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) return } }() select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(30 * time.Second): c.Fatal("container is running but should have failed") } } func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { addr := "00:16:3E:08:00:50" args := []string{"run", "--mac-address", addr} expected := addr if daemonPlatform != "windows" { args = append(args, "busybox", "ifconfig") } else { args = append(args, WindowsBaseImage, "ipconfig", "/all") expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) } if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { c.Fatalf("Output should have contained %q: %s", expected, out) } } func (s *DockerSuite) TestRunNetHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet != out { c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet == out { c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) } } func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { // TODO Windows. As Windows networking evolves and converges towards // CNM, this test may be possible to enable on Windows. testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") } func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { c.Fatal(err) } dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet != out { c.Fatalf("Container should have host network namespace") } } func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { // TODO Windows. This may be possible to enable in the future. However, // Windows does not currently support --expose, or populate the network // settings seen through inspect. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") id := strings.TrimSpace(out) portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") var ports nat.PortMap err := json.Unmarshal([]byte(portstr), &ports) c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { c.Fatal("Port is not mapped for the port "+port, out) } } } func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") if out != "no" { c.Fatalf("Set default restart policy failed") } } func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") timeout := 10 * time.Second if daemonPlatform == "windows" { timeout = 120 * time.Second } id := strings.TrimSpace(string(out)) if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { c.Fatal(err) } count := inspectField(c, id, "RestartCount") if count != "3" { c.Fatalf("Container was restarted %s times, expected %d", count, 3) } MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") if MaximumRetryCount != "3" { c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } } func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") } func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { // Not applicable on Windows which does not support --read-only testRequires(c, DaemonIsLinux, UserNamespaceROMount) testPriv := true // don't test privileged mode subtest if user namespaces enabled if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { testPriv = false } testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") } func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { // Not applicable on Windows due to use of Unix specific functionality, plus // the use of --read-only which is not supported. testRequires(c, DaemonIsLinux, UserNamespaceROMount) // Ensure we have not broken writing /dev/pts out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") if status != 0 { c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") } expected := "type devpts (rw," if !strings.Contains(string(out), expected) { c.Fatalf("expected output to contain %s but contains %s", expected, out) } } func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { touch := "touch " + strings.Join(filenames, " ") out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) c.Assert(err, checker.NotNil) for _, f := range filenames { expected := "touch: " + f + ": Read-only file system" c.Assert(out, checker.Contains, expected) } if !testPriv { return } out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) c.Assert(err, checker.NotNil) for _, f := range filenames { expected := "touch: " + f + ": Read-only file system" c.Assert(out, checker.Contains, expected) } } func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { // Not applicable on Windows which does not support --link testRequires(c, DaemonIsLinux, UserNamespaceROMount) dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") if !strings.Contains(string(out), "testlinked") { c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") } } func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { // Not applicable on Windows which does not support either --read-only or --dns. testRequires(c, DaemonIsLinux, UserNamespaceROMount) out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") if !strings.Contains(string(out), "1.1.1.1") { c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") } } func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { // Not applicable on Windows which does not support --read-only testRequires(c, DaemonIsLinux, UserNamespaceROMount) out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(string(out), "testreadonly") { c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") } } func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") // Remove the main volume container and restart the consuming container dockerCmd(c, "rm", "-f", "voltest") // This should not fail since the volumes-from were already applied dockerCmd(c, "restart", "restarter") } // run container with --rm should remove container if exit code != 0 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { name := "flowers" out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") if err == nil { c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { c.Fatal(out, err) } if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { name := "sparkles" out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") if err == nil { c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { c.Fatal(out, err) } if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, NotUserNamespace) name := "ibuildthecloud" dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") c.Assert(waitRun(name), check.IsNil) errchan := make(chan error) go func() { if out, _, err := dockerCmdWithError("kill", name); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() select { case err := <-errchan: c.Assert(err, check.IsNil) case <-time.After(5 * time.Second): c.Fatal("Kill container timed out") } } func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { // TODO Windows. This may be possible to enable once Windows supports // memory limits on containers testRequires(c, DaemonIsLinux) // this memory limit is 1 byte less than the min, which is 4MB // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { c.Fatalf("expected run to fail when using too low a memory limit: %q", out) } } func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") if err == nil || code == 0 { c.Fatal("standard container should not be able to write to /proc/asound") } } func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") if code != 0 { return } if err != nil { c.Fatal(err) } if strings.Trim(out, "\n ") != "" { c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) } } func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) // some kernels don't have this configured so skip the test if this file is not found // on the host running the tests. if _, err := os.Stat("/proc/latency_stats"); err != nil { c.Skip("kernel doesn't have latency_stats configured") return } out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") if code != 0 { return } if err != nil { c.Fatal(err) } if strings.Trim(out, "\n ") != "" { c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) } } func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) testReadPaths := []string{ "/proc/latency_stats", "/proc/timer_stats", "/proc/kcore", } for i, filePath := range testReadPaths { name := fmt.Sprintf("procsieve-%d", i) shellCmd := fmt.Sprintf("exec 3<%s", filePath) out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) if exitCode != 0 { return } if err != nil { c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) } } } func (s *DockerSuite) TestMountIntoProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") if err == nil || code == 0 { c.Fatal("container should not be able to mount into /proc") } } func (s *DockerSuite) TestMountIntoSys(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) testRequires(c, NotUserNamespace) dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") } func (s *DockerSuite) TestRunUnshareProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. errChan := make(chan error) go func() { name := "acidburn" out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") if err == nil || !(strings.Contains(strings.ToLower(out), "permission denied") || strings.Contains(strings.ToLower(out), "operation not permitted")) { errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) } else { errChan <- nil } }() go func() { name := "cereal" out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") if err == nil || !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || strings.Contains(strings.ToLower(out), "permission denied") || strings.Contains(strings.ToLower(out), "operation not permitted")) { errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) } else { errChan <- nil } }() /* Ensure still fails if running privileged with the default policy */ go func() { name := "crashoverride" out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") if err == nil || !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || strings.Contains(strings.ToLower(out), "permission denied") || strings.Contains(strings.ToLower(out), "operation not permitted")) { errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) } else { errChan <- nil } }() for i := 0; i < 3; i++ { err := <-errChan if err != nil { c.Fatal(err) } } } func (s *DockerSuite) TestRunPublishPort(c *check.C) { // TODO Windows: This may be possible once Windows moves to libnetwork and CNM testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") out, _ := dockerCmd(c, "port", "test") out = strings.Trim(out, "\r\n") if out != "" { c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) } } // Issue #10184. func (s *DockerSuite) TestDevicePermissions(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) const permissions = "crw-rw-rw-" out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") if status != 0 { c.Fatalf("expected status 0, got %d", status) } if !strings.HasPrefix(out, permissions) { c.Fatalf("output should begin with %q, got %q", permissions, out) } } func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } // https://github.com/docker/docker/pull/14498 func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { prefix, slash := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") if daemonPlatform != "windows" { mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) if mRO.RW { c.Fatalf("Expected RO volume was RW") } } mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) if !mRW.RW { c.Fatalf("Expected RW volume was RO") } } func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) testWritePaths := []string{ /* modprobe and core_pattern should both be denied by generic * policy of denials for /proc/sys/kernel. These files have been * picked to be checked as they are particularly sensitive to writes */ "/proc/sys/kernel/modprobe", "/proc/sys/kernel/core_pattern", "/proc/sysrq-trigger", "/proc/kcore", } for i, filePath := range testWritePaths { name := fmt.Sprintf("writeprocsieve-%d", i) shellCmd := fmt.Sprintf("exec 3>%s", filePath) out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) if code != 0 { return } if err != nil { c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) } } } func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) expected := "test123" filename := createTmpFile(c, expected) defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) if actual != expected { c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) } } } func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) filename := createTmpFile(c, "test123") defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) if err == nil || exitCode == 0 { c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) } } } func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) filename := createTmpFile(c, "test123") defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) if exitCode != 0 { c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) } } for i := range nwfiles { _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) if err == nil || exitCode == 0 { c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) } } } func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := s.setupTrustedImage(c, "trusted-run") // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s\n", err, out) } if !strings.Contains(string(out), "Tagging") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Try untrusted run to ensure we pushed the tag to the registry runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s", err, out) } if !strings.Contains(string(out), "Status: Downloaded") { c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) } } func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) dockerCmd(c, "rmi", repoName) // Try trusted run on untrusted tag runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Error expected when running trusted run with:\n%s", out) } if !strings.Contains(string(out), "does not have trust data for") { c.Fatalf("Missing expected output on trusted run:\n%s", out) } } func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) c.Skip("Currently changes system time, causing instability") repoName := s.setupTrustedImage(c, "trusted-run-expired") // Certificates have 10 years of expiration elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) runAtDifferentDate(elevenYearsFromNow, func() { // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) } if !strings.Contains(string(out), "could not validate the path to a trusted root") { c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) } }) runAtDifferentDate(elevenYearsFromNow, func() { // Try run runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) } if !strings.Contains(string(out), "Status: Downloaded") { c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) } }) } func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") if err != nil { c.Fatalf("Failed to create local temp dir") } // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s", err, out) } if !strings.Contains(string(out), "Tagging") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() s.not, err = newTestNotary(c) if err != nil { c.Fatalf("Restarting notary server failed.") } // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. runCmd = exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) } if !strings.Contains(out, "could not rotate trust to a new trusted root") { c.Fatalf("Missing expected output on trusted run:\n%s", out) } } func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) pid1 := inspectField(c, id, "State.Pid") _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace // itself, but pid>1 should not be able to trace pid1. _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") if exitCode == 0 { c.Fatal("ptrace was not successfully restricted by AppArmor") } } func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") if exitCode != 0 { c.Fatal("ptrace of self failed.") } } func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") if exitCode == 0 { // If our test failed, attempt to repair the host system... _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") if exitCode == 0 { c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") } } } func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") } // run create container failed should clean up the container func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { // TODO Windows. This may be possible to enable once link is supported testRequires(c, DaemonIsLinux) name := "unique_name" _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) containerID, err := inspectFieldWithError(name, "Id") c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) } func (s *DockerSuite) TestRunNamedVolume(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") } func (s *DockerSuite) TestRunWithUlimits(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") ul := strings.TrimSpace(out) if ul != "42" { c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) } } func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "test" name := "cgroup-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "/cgroup-parent/test" name := "cgroup-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } // TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" cleanCgroupParent := "SHOULD_NOT_EXIST" name := "cgroup-invalid-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { // XXX: This may include a daemon crash. c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cleanCgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } // TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" cleanCgroupParent := "/SHOULD_NOT_EXIST" name := "cgroup-absolute-invalid-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { // XXX: This may include a daemon crash. c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cleanCgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { // Not applicable on Windows as uses Unix specific functionality // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) filename := "/sys/fs/cgroup/devices/test123" out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) if err == nil { c.Fatal("expected cgroup mount point to be read-only, touch file should fail") } expected := "Read-only file system" if !strings.Contains(out, expected) { c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } } func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") if err == nil || !strings.Contains(out, "cannot join own network") { c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) } } func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") if err != nil { c.Fatalf("failed to run container: %v, output: %q", err, out) } out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { c.Fatalf("run --net=container with --dns should error out") } out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { c.Fatalf("run --net=container with --mac-address should error out") } out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { c.Fatalf("run --net=container with --add-host should error out") } } func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { c.Fatalf("run --net=container with -p should error out") } out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { c.Fatalf("run --net=container with -P should error out") } out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { c.Fatalf("run --net=container with --expose should error out") } } func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { // Not applicable on Windows which does not support --net=container or --link testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") } func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { // TODO Windows: This may be possible to convert. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") var ( count = 0 parts = strings.Split(out, "\n") ) for _, l := range parts { if l != "" { count++ } } if count != 1 { c.Fatalf("Wrong interface count in container %d", count) } if !strings.HasPrefix(out, "1: lo") { c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } } // Issue #4681 func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { if daemonPlatform == "windows" { dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") } } func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { // Windows does not support --net=container testRequires(c, DaemonIsLinux, ExecSupport) dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") if out1 != out { c.Fatal("containers with shared net namespace should have same hostname") } } func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { // TODO Windows: Network settings are not currently propagated. This may // be resolved in the future with the move to libnetwork and CNM. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") id := strings.TrimSpace(out) res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") if res != "" { c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) } } func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { // Not applicable as Windows does not support --net=host testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") dockerCmd(c, "stop", "first") dockerCmd(c, "stop", "second") } func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") } func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check connectivity between containers in testnetwork2 dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") // Connect containers to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") dockerCmd(c, "network", "connect", "testnetwork2", "second") // Check connectivity between containers dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") } func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run 1 container in testnetwork1 and another in testnetwork2 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check Isolation between containers : ping must fail _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.NotNil) // Connect first container to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") // ping must succeed now _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.IsNil) // Disconnect first container from testnetwork2 dockerCmd(c, "network", "disconnect", "testnetwork2", "first") // ping must fail again _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Network delete with active containers must fail _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") c.Assert(err, check.NotNil) dockerCmd(c, "stop", "first") _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check connectivity between containers in testnetwork2 dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") // Connect containers to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") dockerCmd(c, "network", "connect", "testnetwork2", "second") // Check connectivity between containers dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") // Stop second container and test ping failures on both networks dockerCmd(c, "stop", "second") _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") c.Assert(err, check.NotNil) // Start second container and connectivity must be restored on both networks dockerCmd(c, "start", "second") dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") } func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Run a container with --net=host dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Run second container in first container's network namespace dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) } func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) // create a container connected to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Connect second container to none network. it must fail as well _, _, err = dockerCmdWithError("network", "connect", "none", "second") c.Assert(err, check.NotNil) } // #11957 - stdin with no tty does not exit if stdin is not closed even though container exited func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") in, err := cmd.StdinPipe() c.Assert(err, check.IsNil) defer in.Close() stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout cmd.Stderr = stdout c.Assert(cmd.Start(), check.IsNil) waitChan := make(chan error) go func() { waitChan <- cmd.Wait() }() select { case err := <-waitChan: c.Assert(err, check.IsNil, check.Commentf(stdout.String())) case <-time.After(30 * time.Second): c.Fatal("timeout waiting for command to exit") } } func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { // TODO Windows: This needs validation (error out) in the daemon. testRequires(c, DaemonIsLinux) out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") c.Assert(err, check.NotNil) expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" if !(strings.Contains(out, expected) || exitCode == 125) { c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) } } func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { // TODO Windows: This needs validation (error out) in the daemon. testRequires(c, DaemonIsLinux) out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") c.Assert(err, check.NotNil) expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" if !(strings.Contains(out, expected) || exitCode == 125) { c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) } } // TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { name := "testNonExecutableCmd" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == 127 && strings.Contains(stateExitCode, "127")) { c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) } } // TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { name := "testNonExistingCmd" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == 127 && strings.Contains(stateExitCode, "127")) { c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) } } // TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or // 127 on Windows. The difference is that in Windows, the container must be started // as that's when the check is made (and yes, by its design...) func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { expected := 126 if daemonPlatform == "windows" { expected = 127 } name := "testCmdCannotBeInvoked" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) } } // TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "foo") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) } } // TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed func (s *DockerSuite) TestDockerFails(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 125) { c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) } } // TestRunInvalidReference invokes docker run with a bad reference. func (s *DockerSuite) TestRunInvalidReference(c *check.C) { out, exit, _ := dockerCmdWithError("run", "busybox@foo") if exit == 0 { c.Fatalf("expected non-zero exist code; received %d", exit) } if !strings.Contains(out, "Error parsing reference") { c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) } } // Test fix for issue #17854 func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { // Not applicable on Windows as it does not support Linux uid/gid ownership testRequires(c, DaemonIsLinux) name := "testetcfileownership" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN chown dockerio:dockerio /etc`, true) if err != nil { c.Fatal(err) } // Test that dockerio ownership of /etc is retained at runtime out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") out = strings.TrimSpace(out) if out != "dockerio:dockerio" { c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) } } func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { testRequires(c, DaemonIsLinux) expected := "642" out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") oomScoreAdj := strings.TrimSpace(out) if oomScoreAdj != "642" { c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) } } func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") c.Assert(err, check.NotNil) expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(out, expected) { c.Fatalf("Expected output to contain %q, got %q instead", expected, out) } out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") c.Assert(err, check.NotNil) expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(out, expected) { c.Fatalf("Expected output to contain %q, got %q instead", expected, out) } } func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { // Volume propagation is linux only. Also it creates directories for // bind mounting, so needs to be same host. testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) // Prepare a source directory to bind mount tmpDir, err := ioutil.TempDir("", "volume-source") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { c.Fatal(err) } // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") // Make sure a bind mount under a shared volume propagated to host. if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { c.Fatalf("Bind mount under shared volume did not propagate to host") } mount.Unmount(path.Join(tmpDir, "mnt1")) } func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { // Volume propagation is linux only. Also it creates directories for // bind mounting, so needs to be same host. testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) // Prepare a source directory to bind mount tmpDir, err := ioutil.TempDir("", "volume-source") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { c.Fatal(err) } // Prepare a source directory with file in it. We will bind mount this // directory and see if file shows up. tmpDir2, err := ioutil.TempDir("", "volume-source2") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir2) if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { c.Fatal(err) } // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside // container then contents of tmpDir2/slave-testfile should become // visible at "/volume-dest/mnt1/slave-testfile" cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") mount.Unmount(path.Join(tmpDir, "mnt1")) if out != "Test" { c.Fatalf("Bind mount under slave volume did not propagate to container") } } func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") c.Assert(exitCode, checker.Not(checker.Equals), 0) c.Assert(out, checker.Contains, "invalid mount config") } func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { testRequires(c, DaemonIsLinux) testImg := "testvolumecopy" _, err := buildImage(testImg, ` FROM busybox RUN mkdir -p /foo && echo hello > /foo/hello `, true) c.Assert(err, check.IsNil) dockerCmd(c, "run", "-v", "foo:/foo", testImg) out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") c.Assert(strings.TrimSpace(out), check.Equals, "hello") } func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "test") dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "rm", "-fv", "test") dockerCmd(c, "volume", "inspect", "test") out, _ = dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") } func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "test") dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") // Remove the parent so there are not other references to the volumes dockerCmd(c, "rm", "-f", "parent") // now remove the child and ensure the named volume (and only the named volume) still exists dockerCmd(c, "rm", "-fv", "child") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") } func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { nroutines, err := getGoroutineNumber() c.Assert(err, checker.IsNil) runSleepingContainer(c, "--name=test", "-p", "8000:8000") // Wait until container is fully up and running c.Assert(waitRun("test"), check.IsNil) out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") // We will need the following `inspect` to diagnose the issue if test fails (#21247) out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) // check for windows error as well // TODO Windows Post TP5. Fix the error message string c.Assert(strings.Contains(string(out), "port is already allocated") || strings.Contains(string(out), "were not connected because a duplicate name exists") || strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) dockerCmd(c, "rm", "-f", "test") // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) } // Test for one character directory name case (#20122) func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") } func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume _, err := buildImage("volumecopy", `FROM busybox RUN mkdir /foo && echo hello > /foo/bar CMD cat /foo/bar`, true, ) c.Assert(err, checker.IsNil) dockerCmd(c, "volume", "create", "test") // test with the nocopy flag out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") c.Assert(err, checker.NotNil, check.Commentf(out)) // test default behavior which is to copy for non-binds out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") c.Assert(strings.TrimSpace(out), checker.Equals, "hello") // error out when the volume is already populated out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") c.Assert(err, checker.NotNil, check.Commentf(out)) // do not error out when copy isn't explicitly set even though it's already populated out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") c.Assert(strings.TrimSpace(out), checker.Equals, "hello") // do not allow copy modes on volumes-from dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") c.Assert(err, checker.NotNil, check.Commentf(out)) out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") c.Assert(err, checker.NotNil, check.Commentf(out)) // do not allow copy modes on binds out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") c.Assert(err, checker.NotNil, check.Commentf(out)) out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") c.Assert(err, checker.NotNil, check.Commentf(out)) } // Test case for #21976 func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) expectedOutput := "nameserver 127.0.0.1" expectedWarning := "Localhost DNS setting" out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) expectedOutput = "nameserver 1.2.3.4" out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) expectedOutput = "search example.com" out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) expectedOutput = "options timeout:3" out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) expectedOutput1 := "nameserver 1.2.3.4" expectedOutput2 := "search example.com" expectedOutput3 := "options timeout:3" out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) } // Test case for #21976 func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) expectedOutput := "1.2.3.4\textra" out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) } func (s *DockerSuite) TestRunRmAndWait(c *check.C) { dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") out, code, err := dockerCmdWithError("wait", "test") c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) c.Assert(code, checker.Equals, 0) } // Test case for #23498 func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-entrypoint" dockerfile := `FROM busybox ADD entrypoint.sh /entrypoint.sh RUN chmod 755 /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD echo foobar` ctx, err := fakeContext(dockerfile, map[string]string{ "entrypoint.sh": `#!/bin/sh echo "I am an entrypoint" exec "$@"`, }) c.Assert(err, check.IsNil) defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") c.Assert(strings.TrimSpace(out), check.Equals, "foo") // CMD will be reset as well (the same as setting a custom entrypoint) _, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name) c.Assert(err, check.NotNil) c.Assert(err.Error(), checker.Contains, "No command specified") } func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil) name := "test-A" _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(s.d.waitRun(name), check.IsNil) out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "[nofile=65535:65535]") name = "test-B" _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(s.d.waitRun(name), check.IsNil) out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "[nofile=42:42]") } func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { nroutines, err := getGoroutineNumber() c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) } // Handles error conditions for --credentialspec. Validating E2E success cases // requires additional infrastructure (AD for example) on CI servers. func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { testRequires(c, DaemonIsWindows) attempts := []struct{ value, expectedError string }{ {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, {"file://", "no value supplied for file:// credential spec security option"}, {"registry://", "no value supplied for registry:// credential spec security option"}, {`file://c:\blah.txt`, "path cannot be absolute"}, {`file://doesnotexist.txt`, "The system cannot find the file specified"}, } for _, attempt := range attempts { _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) } } // Windows specific test to validate credential specs with a well-formed spec. // Note it won't actually do anything in CI configuration with the spec, but // it should not fail to run a container. func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { testRequires(c, DaemonIsWindows, SameHostDaemon) validCS := readFile(`fixtures\credentialspecs\valid.json`, c) writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c) dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") } // Windows specific test to ensure that a servicing app container is started // if necessary once a container exits. It does this by forcing a no-op // servicing event and verifying the event from Hyper-V-Compute func (s *DockerSuite) TestRunServicingContainer(c *check.C) { testRequires(c, DaemonIsWindows, SameHostDaemon) out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") containerID := strings.TrimSpace(out) err := waitExited(containerID, 60*time.Second) c.Assert(err, checker.IsNil) cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) out2, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.IsNil) c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) } func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) tmpFile, err := ioutil.TempFile("", "touch-me") c.Assert(err, checker.IsNil) defer tmpFile.Close() data := "touch-me-foo-bar\n" if _, err := tmpFile.Write([]byte(data)); err != nil { c.Fatal(err) } name := "test" out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") c.Assert(out, checker.Not(checker.Contains), "tmp:") c.Assert(out, checker.Contains, data) out = inspectFieldJSON(c, name, "Config.Volumes") c.Assert(out, checker.Contains, "null") } func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { testRequires(c, DaemonIsWindows) out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") c.Assert(strings.TrimSpace(out), checker.Equals, "testing") out = inspectField(c, "test", "HostConfig.CPUCount") c.Assert(out, check.Equals, "1") } func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { testRequires(c, DaemonIsWindows) out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") c.Assert(strings.TrimSpace(out), checker.Equals, "testing") out = inspectField(c, "test", "HostConfig.CPUShares") c.Assert(out, check.Equals, "1000") } func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { testRequires(c, DaemonIsWindows) out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") c.Assert(strings.TrimSpace(out), checker.Equals, "testing") out = inspectField(c, "test", "HostConfig.CPUPercent") c.Assert(out, check.Equals, "80") } func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { testRequires(c, DaemonIsWindows, IsolationIsProcess) out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") c.Assert(strings.TrimSpace(out), checker.Contains, "testing") out = inspectField(c, "test", "HostConfig.CPUCount") c.Assert(out, check.Equals, "1") out = inspectField(c, "test", "HostConfig.CPUShares") c.Assert(out, check.Equals, "0") out = inspectField(c, "test", "HostConfig.CPUPercent") c.Assert(out, check.Equals, "0") } func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { testRequires(c, DaemonIsWindows, IsolationIsHyperv) out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") c.Assert(strings.TrimSpace(out), checker.Contains, "testing") out = inspectField(c, "test", "HostConfig.CPUCount") c.Assert(out, check.Equals, "1") out = inspectField(c, "test", "HostConfig.CPUShares") c.Assert(out, check.Equals, "1000") out = inspectField(c, "test", "HostConfig.CPUPercent") c.Assert(out, check.Equals, "80") } // Test for #25099 func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { testRequires(c, DaemonIsLinux) expectedOutput := "invalid environment variable:" out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, expectedOutput) out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, expectedOutput) out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, expectedOutput) } // #28658 func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { name := "testslowstdinclosing" repeat := 3 // regression happened 50% of the time for i := 0; i < repeat; i++ { cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat") cmd.Stdin = &delayedReader{} done := make(chan error, 1) go func() { _, err := runCommand(cmd) done <- err }() select { case <-time.After(15 * time.Second): c.Fatal("running container timed out") // cleanup in teardown case err := <-done: c.Assert(err, checker.IsNil) } } } type delayedReader struct{} func (s *delayedReader) Read([]byte) (int, error) { time.Sleep(500 * time.Millisecond) return 0, io.EOF }
[ "\"DOCKER_REMAP_ROOT\"" ]
[]
[ "DOCKER_REMAP_ROOT" ]
[]
["DOCKER_REMAP_ROOT"]
go
1
0
src/python/etl.py
#!/Users/iiyakhruschev/opt/anaconda3/bin python # coding: utf-8 import pandas as pd import os import translators as ts from datetime import datetime from langdetect import detect from json_parsers import * from sqlalchemy import create_engine from pymongo import MongoClient # logging logging.basicConfig(filename='logs/etl_{}.log'.format(datetime.now()), level=logging.INFO) # Mongo database and collection client_db = 'stream' client_col = 'frenchgp' # source and target connections MONGO_URI = os.getenv("MONGO_URI") client = MongoClient(MONGO_URI) # database and collection names db = client[client_db] col = db[client_col] # IMPORT df_in = pd.DataFrame(list(col.find({}))) df_in = df_in.drop(['matching_rules'], 1) logging.info('{} - {} records retrieved from mongo'.format(datetime.now(), len(df_in))) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEFINE FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def getData(df): """ Convert the nexted json inside the data column to its own dataframe """ df['author_id'] = df.apply(lambda df: parse_json(df, 'data', 'author_id'), 1) df['created_at'] = df.apply(lambda df: parse_json(df, 'data', 'created_at'), 1) df['geo'] = df.apply(lambda df: parse_json(df, 'data', 'geo'), 1) df['tweet_id'] = df.apply(lambda df: parse_json_exact(df, 'data', 'id'), 1) df['raw_text'] = df.apply(lambda df: parse_json(df, 'data', 'text'), 1) df = df[['tweet_id', 'author_id', 'created_at', 'raw_text']] return df def getTranslation(df): clean_text = df['clean_text'] try: translated_text = ts.google(clean_text, if_use_cn_host=True) except Exception: translated_text = clean_text return translated_text def getCleanText(df): clean_text = ''.join(e for e in df['raw_text'] if e.isascii()) clean_text = ''.join(e for e in clean_text if e not in ["!", "@", "#"]) return clean_text def getRawText(df): raw_text = ''.join(e for e in df['raw_text'] if e.isascii()) return raw_text def getLanguage(df): clean_text = ''.join(e for e in df['raw_text'] if e.isascii()) clean_text = ''.join(e for e in clean_text if e not in ["!", "@", "#"]) try: language = detect(clean_text) except Exception: language = '' return language def getUsers(df): users = df['includes']['users'] return users def getUserDataframe(df): df['user_created_at'] = df.apply( lambda df: parse_json(df, 'users', 'created_at'), 1) df['user_id'] = df.apply(lambda df: parse_json(df, 'users', 'id'), 1) df['location'] = df.apply( lambda df: parse_json(df, 'users', 'location'), 1) df['name'] = df.apply(lambda df: parse_json_exact(df, 'users', 'name'), 1) df['username'] = df.apply( lambda df: parse_json(df, 'users', 'username'), 1) df = df.drop(['users'], 1) return df def escapeArray(df, column): if len(df[column]) == 0: return '' else: return df[column] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLEAN TWEET DATA / TRANSLATE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ data_df = getData(df_in) for column in list(data_df): data_df[column] = data_df.apply( lambda data_df: escapeArray(data_df, column), 1) for i, j in zip(data_df['tweet_id'], range(len(data_df))): if isinstance(i, np.ndarray): data_df['tweet_id'][j] = data_df['tweet_id'][j][0] # Get rid of emojis and hashtags for CLEAN TEXT data_df['clean_text'] = data_df.apply(lambda data_df: getCleanText(data_df), 1) logging.info('{} - text cleaned'.format(datetime.now())) # Get rid of emojis for RAW TEXT storage data_df['raw_text'] = data_df.apply(lambda data_df: getRawText(data_df), 1) logging.info('{} - emojis removed from raw text'.format(datetime.now())) # Get original language data_df['language'] = data_df.apply(lambda data_df: getLanguage(data_df), 1) logging.info('{} - languages identified '.format(datetime.now())) # Translate everything into English data_df_translate = data_df[data_df['language'] != 'en'][['tweet_id', 'clean_text']] logging.info('{} - {} non-english language records detected'.format(datetime.now(), len(data_df_translate))) data_df_translate['translated_text'] = data_df_translate.apply(lambda data_df_translate: getTranslation(data_df_translate), 1) logging.info('{} - {} non-english language records translated'.format(datetime.now(), len(data_df_translate))) # Convert all tweet ids to strings data_df_translate['tweet_id'] = data_df_translate['tweet_id'].astype(str) data_df['tweet_id'] = data_df['tweet_id'].astype(str) # Merge translated text and original text into one dataframe data_df = data_df.merge(data_df_translate, on='tweet_id', how='left')\ .rename(index=str, columns={'clean_text_x': 'clean_text'})\ .drop('clean_text_y', 1) # Fill NA and reorder the columns data_df['translated_text'] = data_df['translated_text'].fillna(data_df['clean_text']) data_df = data_df[['tweet_id', 'author_id', 'created_at', 'language', 'raw_text', 'clean_text', 'translated_text']] # SEND TO POSTGRES logging.info('{} - sending tweet table to postgres'.format(datetime.now())) engine = create_engine('postgresql://postgres@localhost:5432/frenchgp') data_df.to_sql('tweet', engine, index=False, if_exists='replace') logging.info('{} - export successful'.format(datetime.now())) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ USERS MENTIONED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Get only User data from input dataframe df_in['includes'] = df_in['includes'].fillna('') df_in = df_in[df_in['includes'] != ''] df_in['users'] = df_in.apply(lambda df_in: getUsers(df_in), 1) # Explode arrays users_exploded = df_in[['tweet_id', 'users']].explode('users') users_df = getUserDataframe(users_exploded) users_df['location'] = users_df.apply( lambda users_df: escapeArray(users_df, 'location'), 1) # Get rid of null arrays; convert to empty strings for column in list(users_df): users_df[column] = users_df.apply(lambda users_df: escapeArray(users_df, column), 1) # Reset Index users_df = users_df.reset_index(drop=True) # Get author_id from id array when tweet includes more than one id for i, j in zip(users_df['tweet_id'], range(len(users_df))): if isinstance(i, np.ndarray): users_df['tweet_id'][j] = users_df['tweet_id'][j][0] # SEND TO POSTGRES logging.info( '{} - sending users mentioned table to postgres'.format(datetime.now())) users_df.to_sql('users_mentioned', engine, index=False, if_exists='replace') logging.info('{} - export successful'.format(datetime.now())) logging.info('{} - ETL COMPLETE'.format(datetime.now()))
[]
[]
[ "MONGO_URI" ]
[]
["MONGO_URI"]
python
1
0
lib/crunchrun/crunchrun_test.go
// Copyright (C) The Arvados Authors. All rights reserved. // // SPDX-License-Identifier: AGPL-3.0 package crunchrun import ( "bufio" "bytes" "crypto/md5" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "os" "os/exec" "runtime/pprof" "sort" "strings" "sync" "syscall" "testing" "time" "git.arvados.org/arvados.git/sdk/go/arvados" "git.arvados.org/arvados.git/sdk/go/arvadosclient" "git.arvados.org/arvados.git/sdk/go/arvadostest" "git.arvados.org/arvados.git/sdk/go/manifest" "golang.org/x/net/context" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockernetwork "github.com/docker/docker/api/types/network" . "gopkg.in/check.v1" ) // Gocheck boilerplate func TestCrunchExec(t *testing.T) { TestingT(t) } // Gocheck boilerplate var _ = Suite(&TestSuite{}) type TestSuite struct { client *arvados.Client docker *TestDockerClient runner *ContainerRunner } func (s *TestSuite) SetUpTest(c *C) { s.client = arvados.NewClientFromEnv() s.docker = NewTestDockerClient() } type ArvTestClient struct { Total int64 Calls int Content []arvadosclient.Dict arvados.Container secretMounts []byte Logs map[string]*bytes.Buffer sync.Mutex WasSetRunning bool callraw bool } type KeepTestClient struct { Called bool Content []byte } var hwManifest = ". 82ab40c24fc8df01798e57ba66795bb1+841216+Aa124ac75e5168396c73c0a18eda641a4f41791c0@569fa8c3 0:841216:9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7.tar\n" var hwPDH = "a45557269dcb65a6b78f9ac061c0850b+120" var hwImageId = "9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7" var otherManifest = ". 68a84f561b1d1708c6baff5e019a9ab3+46+Ae5d0af96944a3690becb1decdf60cc1c937f556d@5693216f 0:46:md5sum.txt\n" var otherPDH = "a3e8f74c6f101eae01fa08bfb4e49b3a+54" var normalizedManifestWithSubdirs = `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt ./subdir1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt ./subdir1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt ` var normalizedWithSubdirsPDH = "a0def87f80dd594d4675809e83bd4f15+367" var denormalizedManifestWithSubdirs = ". 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt 0:10:subdir1/file1_in_subdir1.txt 10:17:subdir1/file2_in_subdir1.txt\n" var denormalizedWithSubdirsPDH = "b0def87f80dd594d4675809e83bd4f15+367" var fakeAuthUUID = "zzzzz-gj3su-55pqoyepgi2glem" var fakeAuthToken = "a3ltuwzqcu2u4sc0q7yhpc2w7s00fdcqecg5d6e0u3pfohmbjt" type TestDockerClient struct { imageLoaded string logReader io.ReadCloser logWriter io.WriteCloser fn func(t *TestDockerClient) exitCode int stop chan bool cwd string env []string api *ArvTestClient realTemp string calledWait bool ctrExited bool } func NewTestDockerClient() *TestDockerClient { t := &TestDockerClient{} t.logReader, t.logWriter = io.Pipe() t.stop = make(chan bool, 1) t.cwd = "/" return t } type MockConn struct { net.Conn } func (m *MockConn) Write(b []byte) (int, error) { return len(b), nil } func NewMockConn() *MockConn { c := &MockConn{} return c } func (t *TestDockerClient) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) { return dockertypes.HijackedResponse{Conn: NewMockConn(), Reader: bufio.NewReader(t.logReader)}, nil } func (t *TestDockerClient) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) { if config.WorkingDir != "" { t.cwd = config.WorkingDir } t.env = config.Env return dockercontainer.ContainerCreateCreatedBody{ID: "abcde"}, nil } func (t *TestDockerClient) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error { if t.exitCode == 3 { return errors.New(`Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:359: container init caused \"rootfs_linux.go:54: mounting \\\"/tmp/keep453790790/by_id/99999999999999999999999999999999+99999/myGenome\\\" to rootfs \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged\\\" at \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged/keep/99999999999999999999999999999999+99999/myGenome\\\" caused \\\"no such file or directory\\\"\""`) } if t.exitCode == 4 { return errors.New(`panic: standard_init_linux.go:175: exec user process caused "no such file or directory"`) } if t.exitCode == 5 { return errors.New(`Error response from daemon: Cannot start container 41f26cbc43bcc1280f4323efb1830a394ba8660c9d1c2b564ba42bf7f7694845: [8] System error: no such file or directory`) } if t.exitCode == 6 { return errors.New(`Error response from daemon: Cannot start container 58099cd76c834f3dc2a4fb76c8028f049ae6d4fdf0ec373e1f2cfea030670c2d: [8] System error: exec: "foobar": executable file not found in $PATH`) } if container == "abcde" { // t.fn gets executed in ContainerWait return nil } else { return errors.New("Invalid container id") } } func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error { t.stop <- true return nil } func (t *TestDockerClient) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) { t.calledWait = true body := make(chan dockercontainer.ContainerWaitOKBody, 1) err := make(chan error) go func() { t.fn(t) body <- dockercontainer.ContainerWaitOKBody{StatusCode: int64(t.exitCode)} }() return body, err } func (t *TestDockerClient) ContainerInspect(ctx context.Context, id string) (c dockertypes.ContainerJSON, err error) { c.ContainerJSONBase = &dockertypes.ContainerJSONBase{} c.ID = "abcde" if t.ctrExited { c.State = &dockertypes.ContainerState{Status: "exited", Dead: true} } else { c.State = &dockertypes.ContainerState{Status: "running", Pid: 1234, Running: true} } return } func (t *TestDockerClient) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) { if t.exitCode == 2 { return dockertypes.ImageInspect{}, nil, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?") } if t.imageLoaded == image { return dockertypes.ImageInspect{}, nil, nil } else { return dockertypes.ImageInspect{}, nil, errors.New("") } } func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) { if t.exitCode == 2 { return dockertypes.ImageLoadResponse{}, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?") } _, err := io.Copy(ioutil.Discard, input) if err != nil { return dockertypes.ImageLoadResponse{}, err } else { t.imageLoaded = hwImageId return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil } } func (*TestDockerClient) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) { return nil, nil } func (client *ArvTestClient) Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error { client.Mutex.Lock() defer client.Mutex.Unlock() client.Calls++ client.Content = append(client.Content, parameters) if resourceType == "logs" { et := parameters["log"].(arvadosclient.Dict)["event_type"].(string) if client.Logs == nil { client.Logs = make(map[string]*bytes.Buffer) } if client.Logs[et] == nil { client.Logs[et] = &bytes.Buffer{} } client.Logs[et].Write([]byte(parameters["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"])) } if resourceType == "collections" && output != nil { mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string) outmap := output.(*arvados.Collection) outmap.PortableDataHash = fmt.Sprintf("%x+%d", md5.Sum([]byte(mt)), len(mt)) outmap.UUID = fmt.Sprintf("zzzzz-4zz18-%15.15x", md5.Sum([]byte(mt))) } return nil } func (client *ArvTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error { switch { case method == "GET" && resourceType == "containers" && action == "auth": return json.Unmarshal([]byte(`{ "kind": "arvados#api_client_authorization", "uuid": "`+fakeAuthUUID+`", "api_token": "`+fakeAuthToken+`" }`), output) case method == "GET" && resourceType == "containers" && action == "secret_mounts": if client.secretMounts != nil { return json.Unmarshal(client.secretMounts, output) } else { return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output) } default: return fmt.Errorf("Not found") } } func (client *ArvTestClient) CallRaw(method, resourceType, uuid, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error) { var j []byte if method == "GET" && resourceType == "nodes" && uuid == "" && action == "" { j = []byte(`{ "kind": "arvados#nodeList", "items": [{ "uuid": "zzzzz-7ekkf-2z3mc76g2q73aio", "hostname": "compute2", "properties": {"total_cpu_cores": 16} }]}`) } else if method == "GET" && resourceType == "containers" && action == "" && !client.callraw { if uuid == "" { j, err = json.Marshal(map[string]interface{}{ "items": []interface{}{client.Container}, "kind": "arvados#nodeList", }) } else { j, err = json.Marshal(client.Container) } } else { j = []byte(`{ "command": ["sleep", "1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"}, "/json": {"kind": "json", "content": {"number": 123456789123456789}}}, "output_path": "/tmp", "priority": 1, "runtime_constraints": {} }`) } return ioutil.NopCloser(bytes.NewReader(j)), err } func (client *ArvTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error { if resourceType == "collections" { if uuid == hwPDH { output.(*arvados.Collection).ManifestText = hwManifest } else if uuid == otherPDH { output.(*arvados.Collection).ManifestText = otherManifest } else if uuid == normalizedWithSubdirsPDH { output.(*arvados.Collection).ManifestText = normalizedManifestWithSubdirs } else if uuid == denormalizedWithSubdirsPDH { output.(*arvados.Collection).ManifestText = denormalizedManifestWithSubdirs } } if resourceType == "containers" { (*output.(*arvados.Container)) = client.Container } return nil } func (client *ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) { client.Mutex.Lock() defer client.Mutex.Unlock() client.Calls++ client.Content = append(client.Content, parameters) if resourceType == "containers" { if parameters["container"].(arvadosclient.Dict)["state"] == "Running" { client.WasSetRunning = true } } else if resourceType == "collections" { mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string) output.(*arvados.Collection).UUID = uuid output.(*arvados.Collection).PortableDataHash = fmt.Sprintf("%x", md5.Sum([]byte(mt))) } return nil } var discoveryMap = map[string]interface{}{ "defaultTrashLifetime": float64(1209600), "crunchLimitLogBytesPerJob": float64(67108864), "crunchLogThrottleBytes": float64(65536), "crunchLogThrottlePeriod": float64(60), "crunchLogThrottleLines": float64(1024), "crunchLogPartialLineThrottlePeriod": float64(5), "crunchLogBytesPerEvent": float64(4096), "crunchLogSecondsBetweenEvents": float64(1), } func (client *ArvTestClient) Discovery(key string) (interface{}, error) { return discoveryMap[key], nil } // CalledWith returns the parameters from the first API call whose // parameters match jpath/string. E.g., CalledWith(c, "foo.bar", // "baz") returns parameters with parameters["foo"]["bar"]=="baz". If // no call matches, it returns nil. func (client *ArvTestClient) CalledWith(jpath string, expect interface{}) arvadosclient.Dict { call: for _, content := range client.Content { var v interface{} = content for _, k := range strings.Split(jpath, ".") { if dict, ok := v.(arvadosclient.Dict); !ok { continue call } else { v = dict[k] } } if v == expect { return content } } return nil } func (client *KeepTestClient) LocalLocator(locator string) (string, error) { return locator, nil } func (client *KeepTestClient) PutB(buf []byte) (string, int, error) { client.Content = buf return fmt.Sprintf("%x+%d", md5.Sum(buf), len(buf)), len(buf), nil } func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) { return 0, errors.New("not implemented") } func (client *KeepTestClient) ClearBlockCache() { } func (client *KeepTestClient) Close() { client.Content = nil } type FileWrapper struct { io.ReadCloser len int64 } func (fw FileWrapper) Readdir(n int) ([]os.FileInfo, error) { return nil, errors.New("not implemented") } func (fw FileWrapper) Seek(int64, int) (int64, error) { return 0, errors.New("not implemented") } func (fw FileWrapper) Size() int64 { return fw.len } func (fw FileWrapper) Stat() (os.FileInfo, error) { return nil, errors.New("not implemented") } func (fw FileWrapper) Truncate(int64) error { return errors.New("not implemented") } func (fw FileWrapper) Write([]byte) (int, error) { return 0, errors.New("not implemented") } func (fw FileWrapper) Sync() error { return errors.New("not implemented") } func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) { if filename == hwImageId+".tar" { rdr := ioutil.NopCloser(&bytes.Buffer{}) client.Called = true return FileWrapper{rdr, 1321984}, nil } else if filename == "/file1_in_main.txt" { rdr := ioutil.NopCloser(strings.NewReader("foo")) client.Called = true return FileWrapper{rdr, 3}, nil } return nil, nil } func (s *TestSuite) TestLoadImage(c *C) { cr, err := NewContainerRunner(s.client, &ArvTestClient{}, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) kc := &KeepTestClient{} defer kc.Close() cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = kc _, err = cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) c.Check(err, IsNil) _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId) c.Check(err, NotNil) cr.Container.ContainerImage = hwPDH // (1) Test loading image from keep c.Check(kc.Called, Equals, false) c.Check(cr.ContainerConfig.Image, Equals, "") err = cr.LoadImage() c.Check(err, IsNil) defer func() { cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) }() c.Check(kc.Called, Equals, true) c.Check(cr.ContainerConfig.Image, Equals, hwImageId) _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId) c.Check(err, IsNil) // (2) Test using image that's already loaded kc.Called = false cr.ContainerConfig.Image = "" err = cr.LoadImage() c.Check(err, IsNil) c.Check(kc.Called, Equals, false) c.Check(cr.ContainerConfig.Image, Equals, hwImageId) } type ArvErrorTestClient struct{} func (ArvErrorTestClient) Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error { return nil } func (ArvErrorTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error { if method == "GET" && resourceType == "containers" && action == "auth" { return nil } return errors.New("ArvError") } func (ArvErrorTestClient) CallRaw(method, resourceType, uuid, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error) { return nil, errors.New("ArvError") } func (ArvErrorTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error { return errors.New("ArvError") } func (ArvErrorTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) { return nil } func (ArvErrorTestClient) Discovery(key string) (interface{}, error) { return discoveryMap[key], nil } type KeepErrorTestClient struct { KeepTestClient } func (*KeepErrorTestClient) ManifestFileReader(manifest.Manifest, string) (arvados.File, error) { return nil, errors.New("KeepError") } func (*KeepErrorTestClient) PutB(buf []byte) (string, int, error) { return "", 0, errors.New("KeepError") } func (*KeepErrorTestClient) LocalLocator(string) (string, error) { return "", errors.New("KeepError") } type KeepReadErrorTestClient struct { KeepTestClient } func (*KeepReadErrorTestClient) ReadAt(string, []byte, int) (int, error) { return 0, errors.New("KeepError") } type ErrorReader struct { FileWrapper } func (ErrorReader) Read(p []byte) (n int, err error) { return 0, errors.New("ErrorReader") } func (ErrorReader) Seek(int64, int) (int64, error) { return 0, errors.New("ErrorReader") } func (KeepReadErrorTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) { return ErrorReader{}, nil } func (s *TestSuite) TestLoadImageArvError(c *C) { // (1) Arvados error kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, &ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.ContainerArvClient = &ArvErrorTestClient{} cr.ContainerKeepClient = &KeepTestClient{} cr.Container.ContainerImage = hwPDH err = cr.LoadImage() c.Check(err.Error(), Equals, "While getting container image collection: ArvError") } func (s *TestSuite) TestLoadImageKeepError(c *C) { // (2) Keep error kc := &KeepErrorTestClient{} cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = &KeepErrorTestClient{} cr.Container.ContainerImage = hwPDH err = cr.LoadImage() c.Assert(err, NotNil) c.Check(err.Error(), Equals, "While creating ManifestFileReader for container image: KeepError") } func (s *TestSuite) TestLoadImageCollectionError(c *C) { // (3) Collection doesn't contain image kc := &KeepReadErrorTestClient{} cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.Container.ContainerImage = otherPDH cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = &KeepReadErrorTestClient{} err = cr.LoadImage() c.Check(err.Error(), Equals, "First file in the container image collection does not end in .tar") } func (s *TestSuite) TestLoadImageKeepReadError(c *C) { // (4) Collection doesn't contain image kc := &KeepReadErrorTestClient{} cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.Container.ContainerImage = hwPDH cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = &KeepReadErrorTestClient{} err = cr.LoadImage() c.Check(err, NotNil) } type ClosableBuffer struct { bytes.Buffer } func (*ClosableBuffer) Close() error { return nil } type TestLogs struct { Stdout ClosableBuffer Stderr ClosableBuffer } func (tl *TestLogs) NewTestLoggingWriter(logstr string) (io.WriteCloser, error) { if logstr == "stdout" { return &tl.Stdout, nil } if logstr == "stderr" { return &tl.Stderr, nil } return nil, errors.New("???") } func dockerLog(fd byte, msg string) []byte { by := []byte(msg) header := make([]byte, 8+len(by)) header[0] = fd header[7] = byte(len(by)) copy(header[8:], by) return header } func (s *TestSuite) TestRunContainer(c *C) { s.docker.fn = func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "Hello world\n")) t.logWriter.Close() } kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = &KeepTestClient{} var logs TestLogs cr.NewLogWriter = logs.NewTestLoggingWriter cr.Container.ContainerImage = hwPDH cr.Container.Command = []string{"./hw"} err = cr.LoadImage() c.Check(err, IsNil) err = cr.CreateContainer() c.Check(err, IsNil) err = cr.StartContainer() c.Check(err, IsNil) err = cr.WaitFinish() c.Check(err, IsNil) c.Check(strings.HasSuffix(logs.Stdout.String(), "Hello world\n"), Equals, true) c.Check(logs.Stderr.String(), Equals, "") } func (s *TestSuite) TestCommitLogs(c *C) { api := &ArvTestClient{} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp cr.CrunchLog.Print("Hello world!") cr.CrunchLog.Print("Goodbye") cr.finalState = "Complete" err = cr.CommitLogs() c.Check(err, IsNil) c.Check(api.Calls, Equals, 2) c.Check(api.Content[1]["ensure_unique_name"], Equals, true) c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n") c.Check(*cr.LogsPDH, Equals, "63da7bdacf08c40f604daad80c261e9a+60") } func (s *TestSuite) TestUpdateContainerRunning(c *C) { api := &ArvTestClient{} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) err = cr.UpdateContainerRunning() c.Check(err, IsNil) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running") } func (s *TestSuite) TestUpdateContainerComplete(c *C) { api := &ArvTestClient{} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.LogsPDH = new(string) *cr.LogsPDH = "d3a229d2fe3690c2c3e75a71a153c6a3+60" cr.ExitCode = new(int) *cr.ExitCode = 42 cr.finalState = "Complete" err = cr.UpdateContainerFinal() c.Check(err, IsNil) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], Equals, *cr.ExitCode) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Complete") } func (s *TestSuite) TestUpdateContainerCancelled(c *C) { api := &ArvTestClient{} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.cCancelled = true cr.finalState = "Cancelled" err = cr.UpdateContainerFinal() c.Check(err, IsNil) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], IsNil) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], IsNil) c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled") } // Used by the TestFullRun*() test below to DRY up boilerplate setup to do full // dress rehearsal of the Run() function, starting from a JSON container record. func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exitCode int, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, realTemp string) { rec := arvados.Container{} err := json.Unmarshal([]byte(record), &rec) c.Check(err, IsNil) var sm struct { SecretMounts map[string]arvados.Mount `json:"secret_mounts"` } err = json.Unmarshal([]byte(record), &sm) c.Check(err, IsNil) secretMounts, err := json.Marshal(sm) c.Logf("%s %q", sm, secretMounts) c.Check(err, IsNil) s.docker.exitCode = exitCode s.docker.fn = fn s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) api = &ArvTestClient{Container: rec} s.docker.api = api kc := &KeepTestClient{} defer kc.Close() cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) s.runner = cr cr.statInterval = 100 * time.Millisecond cr.containerWatchdogInterval = time.Second am := &ArvMountCmdLine{} cr.RunArvMount = am.ArvMountTest realTemp, err = ioutil.TempDir("", "crunchrun_test1-") c.Assert(err, IsNil) defer os.RemoveAll(realTemp) s.docker.realTemp = realTemp tempcount := 0 cr.MkTempDir = func(_ string, prefix string) (string, error) { tempcount++ d := fmt.Sprintf("%s/%s%d", realTemp, prefix, tempcount) err := os.Mkdir(d, os.ModePerm) if err != nil && strings.Contains(err.Error(), ": file exists") { // Test case must have pre-populated the tempdir err = nil } return d, err } cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) { return &ArvTestClient{secretMounts: secretMounts}, &KeepTestClient{}, nil, nil } if extraMounts != nil && len(extraMounts) > 0 { err := cr.SetupArvMountPoint("keep") c.Check(err, IsNil) for _, m := range extraMounts { os.MkdirAll(cr.ArvMountPoint+"/by_id/"+m, os.ModePerm) } } err = cr.Run() if api.CalledWith("container.state", "Complete") != nil { c.Check(err, IsNil) } if exitCode != 2 { c.Check(api.WasSetRunning, Equals, true) var lastupdate arvadosclient.Dict for _, content := range api.Content { if content["container"] != nil { lastupdate = content["container"].(arvadosclient.Dict) } } if lastupdate["log"] == nil { c.Errorf("no container update with non-nil log -- updates were: %v", api.Content) } } if err != nil { for k, v := range api.Logs { c.Log(k) c.Log(v.String()) } } return } func (s *TestSuite) TestFullRunHello(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello world\n"), Equals, true) } func (s *TestSuite) TestRunAlreadyRunning(c *C) { var ran bool api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "3"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "scheduling_parameters":{"max_run_time": 1}, "state": "Running" }`, nil, 2, func(t *TestDockerClient) { ran = true }) c.Check(api.CalledWith("container.state", "Cancelled"), IsNil) c.Check(api.CalledWith("container.state", "Complete"), IsNil) c.Check(ran, Equals, false) } func (s *TestSuite) TestRunTimeExceeded(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "3"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "scheduling_parameters":{"max_run_time": 1}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { time.Sleep(3 * time.Second) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*maximum run time exceeded.*") } func (s *TestSuite) TestContainerWaitFails(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "3"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.ctrExited = true time.Sleep(10 * time.Second) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Container is not running.*") } func (s *TestSuite) TestCrunchstat(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { time.Sleep(time.Second) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) // We didn't actually start a container, so crunchstat didn't // find accounting files and therefore didn't log any stats. // It should have logged a "can't find accounting files" // message after one poll interval, though, so we can confirm // it's alive: c.Assert(api.Logs["crunchstat"], NotNil) c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files have not appeared after 100ms.*`) // The "files never appeared" log assures us that we called // (*crunchstat.Reporter)Stop(), and that we set it up with // the correct container ID "abcde": c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files never appeared for abcde\n`) } func (s *TestSuite) TestNodeInfoLog(c *C) { os.Setenv("SLURMD_NODENAME", "compute2") api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { time.Sleep(time.Second) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Assert(api.Logs["node"], NotNil) json := api.Logs["node"].String() c.Check(json, Matches, `(?ms).*"uuid": *"zzzzz-7ekkf-2z3mc76g2q73aio".*`) c.Check(json, Matches, `(?ms).*"total_cpu_cores": *16.*`) c.Check(json, Not(Matches), `(?ms).*"info":.*`) c.Assert(api.Logs["node-info"], NotNil) json = api.Logs["node-info"].String() c.Check(json, Matches, `(?ms).*Host Information.*`) c.Check(json, Matches, `(?ms).*CPU Information.*`) c.Check(json, Matches, `(?ms).*Memory Information.*`) c.Check(json, Matches, `(?ms).*Disk Space.*`) c.Check(json, Matches, `(?ms).*Disk INodes.*`) } func (s *TestSuite) TestContainerRecordLog(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["sleep", "1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { time.Sleep(time.Second) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Assert(api.Logs["container"], NotNil) c.Check(api.Logs["container"].String(), Matches, `(?ms).*container_image.*`) } func (s *TestSuite) TestFullRunStderr(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["/bin/sh", "-c", "echo hello ; echo world 1>&2 ; exit 1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 1, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello\n")) t.logWriter.Write(dockerLog(2, "world\n")) t.logWriter.Close() }) final := api.CalledWith("container.state", "Complete") c.Assert(final, NotNil) c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1) c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello\n"), Equals, true) c.Check(strings.HasSuffix(api.Logs["stderr"].String(), "world\n"), Equals, true) } func (s *TestSuite) TestFullRunDefaultCwd(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["pwd"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.cwd+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Log(api.Logs["stdout"]) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/\n"), Equals, true) } func (s *TestSuite) TestFullRunSetCwd(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["pwd"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.cwd+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/bin\n"), Equals, true) } func (s *TestSuite) TestStopOnSignal(c *C) { s.testStopContainer(c, func(cr *ContainerRunner) { go func() { for !s.docker.calledWait { time.Sleep(time.Millisecond) } cr.SigChan <- syscall.SIGINT }() }) } func (s *TestSuite) TestStopOnArvMountDeath(c *C) { s.testStopContainer(c, func(cr *ContainerRunner) { cr.ArvMountExit = make(chan error) go func() { cr.ArvMountExit <- exec.Command("true").Run() close(cr.ArvMountExit) }() }) } func (s *TestSuite) testStopContainer(c *C, setup func(cr *ContainerRunner)) { record := `{ "command": ["/bin/sh", "-c", "echo foo && sleep 30 && echo bar"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` rec := arvados.Container{} err := json.Unmarshal([]byte(record), &rec) c.Check(err, IsNil) s.docker.fn = func(t *TestDockerClient) { <-t.stop t.logWriter.Write(dockerLog(1, "foo\n")) t.logWriter.Close() } s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) api := &ArvTestClient{Container: rec} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil } cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) { return &ArvTestClient{}, &KeepTestClient{}, nil, nil } setup(cr) done := make(chan error) go func() { done <- cr.Run() }() select { case <-time.After(20 * time.Second): pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) c.Fatal("timed out") case err = <-done: c.Check(err, IsNil) } for k, v := range api.Logs { c.Log(k) c.Log(v.String()) } c.Check(api.CalledWith("container.log", nil), NotNil) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["stdout"].String(), Matches, "(?ms).*foo\n$") } func (s *TestSuite) TestFullRunSetEnv(c *C) { api, _, _ := s.fullRunHelper(c, `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "bilbo\n"), Equals, true) } type ArvMountCmdLine struct { Cmd []string token string } func (am *ArvMountCmdLine) ArvMountTest(c []string, token string) (*exec.Cmd, error) { am.Cmd = c am.token = token return nil, nil } func stubCert(temp string) string { path := temp + "/ca-certificates.crt" crt, _ := os.Create(path) crt.Close() arvadosclient.CertFiles = []string{path} return path } func (s *TestSuite) TestSetupMounts(c *C) { api := &ArvTestClient{} kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) am := &ArvMountCmdLine{} cr.RunArvMount = am.ArvMountTest cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = &KeepTestClient{} realTemp, err := ioutil.TempDir("", "crunchrun_test1-") c.Assert(err, IsNil) certTemp, err := ioutil.TempDir("", "crunchrun_test2-") c.Assert(err, IsNil) stubCertPath := stubCert(certTemp) cr.parentTemp = realTemp defer os.RemoveAll(realTemp) defer os.RemoveAll(certTemp) i := 0 cr.MkTempDir = func(_ string, prefix string) (string, error) { i++ d := fmt.Sprintf("%s/%s%d", realTemp, prefix, i) err := os.Mkdir(d, os.ModePerm) if err != nil && strings.Contains(err.Error(), ": file exists") { // Test case must have pre-populated the tempdir err = nil } return d, err } checkEmpty := func() { // Should be deleted. _, err := os.Stat(realTemp) c.Assert(os.IsNotExist(err), Equals, true) // Now recreate it for the next test. c.Assert(os.Mkdir(realTemp, 0777), IsNil) } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"} cr.Container.OutputPath = "/tmp" cr.statInterval = 5 * time.Second err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts["/out"] = arvados.Mount{Kind: "tmp"} cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"} cr.Container.OutputPath = "/out" err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/out", realTemp + "/tmp3:/tmp"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"} cr.Container.OutputPath = "/tmp" apiflag := true cr.Container.RuntimeConstraints.API = &apiflag err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", stubCertPath + ":/etc/arvados/ca-certificates.crt:ro"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() apiflag = false } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = map[string]arvados.Mount{ "/keeptmp": {Kind: "collection", Writable: true}, } cr.Container.OutputPath = "/keeptmp" os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm) err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/tmp0:/keeptmp"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = map[string]arvados.Mount{ "/keepinp": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"}, "/keepout": {Kind: "collection", Writable: true}, } cr.Container.OutputPath = "/keepout" os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm) os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm) err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) sort.StringSlice(cr.Binds).Sort() c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro", realTemp + "/keep1/tmp0:/keepout"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } { i = 0 cr.ArvMountPoint = "" cr.Container.RuntimeConstraints.KeepCacheRAM = 512 cr.Container.Mounts = map[string]arvados.Mount{ "/keepinp": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"}, "/keepout": {Kind: "collection", Writable: true}, } cr.Container.OutputPath = "/keepout" os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm) os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm) err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) sort.StringSlice(cr.Binds).Sort() c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro", realTemp + "/keep1/tmp0:/keepout"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } for _, test := range []struct { in interface{} out string }{ {in: "foo", out: `"foo"`}, {in: nil, out: `null`}, {in: map[string]int64{"foo": 123456789123456789}, out: `{"foo":123456789123456789}`}, } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = map[string]arvados.Mount{ "/mnt/test.json": {Kind: "json", Content: test.in}, } err := cr.SetupMounts() c.Check(err, IsNil) sort.StringSlice(cr.Binds).Sort() c.Check(cr.Binds, DeepEquals, []string{realTemp + "/json2/mountdata.json:/mnt/test.json:ro"}) content, err := ioutil.ReadFile(realTemp + "/json2/mountdata.json") c.Check(err, IsNil) c.Check(content, DeepEquals, []byte(test.out)) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } for _, test := range []struct { in interface{} out string }{ {in: "foo", out: `foo`}, {in: nil, out: "error"}, {in: map[string]int64{"foo": 123456789123456789}, out: "error"}, } { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = map[string]arvados.Mount{ "/mnt/test.txt": {Kind: "text", Content: test.in}, } err := cr.SetupMounts() if test.out == "error" { c.Check(err.Error(), Equals, "content for mount \"/mnt/test.txt\" must be a string") } else { c.Check(err, IsNil) sort.StringSlice(cr.Binds).Sort() c.Check(cr.Binds, DeepEquals, []string{realTemp + "/text2/mountdata.text:/mnt/test.txt:ro"}) content, err := ioutil.ReadFile(realTemp + "/text2/mountdata.text") c.Check(err, IsNil) c.Check(content, DeepEquals, []byte(test.out)) } os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } // Read-only mount points are allowed underneath output_dir mount point { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts = map[string]arvados.Mount{ "/tmp": {Kind: "tmp"}, "/tmp/foo": {Kind: "collection"}, } cr.Container.OutputPath = "/tmp" os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm) err := cr.SetupMounts() c.Check(err, IsNil) c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--crunchstat-interval=5", "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"}) c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", realTemp + "/keep1/tmp0:/tmp/foo:ro"}) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } // Writable mount points copied to output_dir mount point { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts = map[string]arvados.Mount{ "/tmp": {Kind: "tmp"}, "/tmp/foo": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53", Writable: true}, "/tmp/bar": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541d+53", Path: "baz", Writable: true}, } cr.Container.OutputPath = "/tmp" os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm) os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz", os.ModePerm) rf, _ := os.Create(realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz/quux") rf.Write([]byte("bar")) rf.Close() err := cr.SetupMounts() c.Check(err, IsNil) _, err = os.Stat(cr.HostOutputDir + "/foo") c.Check(err, IsNil) _, err = os.Stat(cr.HostOutputDir + "/bar/quux") c.Check(err, IsNil) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } // Only mount points of kind 'collection' are allowed underneath output_dir mount point { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts = map[string]arvados.Mount{ "/tmp": {Kind: "tmp"}, "/tmp/foo": {Kind: "tmp"}, } cr.Container.OutputPath = "/tmp" err := cr.SetupMounts() c.Check(err, NotNil) c.Check(err, ErrorMatches, `Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } // Only mount point of kind 'collection' is allowed for stdin { i = 0 cr.ArvMountPoint = "" cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts = map[string]arvados.Mount{ "stdin": {Kind: "tmp"}, } err := cr.SetupMounts() c.Check(err, NotNil) c.Check(err, ErrorMatches, `Unsupported mount kind 'tmp' for stdin.*`) os.RemoveAll(cr.ArvMountPoint) cr.CleanupDirs() checkEmpty() } // git_tree mounts { i = 0 cr.ArvMountPoint = "" (*GitMountSuite)(nil).useTestGitServer(c) cr.token = arvadostest.ActiveToken cr.Container.Mounts = make(map[string]arvados.Mount) cr.Container.Mounts = map[string]arvados.Mount{ "/tip": { Kind: "git_tree", UUID: arvadostest.Repository2UUID, Commit: "fd3531f42995344f36c30b79f55f27b502f3d344", Path: "/", }, "/non-tip": { Kind: "git_tree", UUID: arvadostest.Repository2UUID, Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e", Path: "/", }, } cr.Container.OutputPath = "/tmp" err := cr.SetupMounts() c.Check(err, IsNil) // dirMap[mountpoint] == tmpdir dirMap := make(map[string]string) for _, bind := range cr.Binds { tokens := strings.Split(bind, ":") dirMap[tokens[1]] = tokens[0] if cr.Container.Mounts[tokens[1]].Writable { c.Check(len(tokens), Equals, 2) } else { c.Check(len(tokens), Equals, 3) c.Check(tokens[2], Equals, "ro") } } data, err := ioutil.ReadFile(dirMap["/tip"] + "/dir1/dir2/file with mode 0644") c.Check(err, IsNil) c.Check(string(data), Equals, "\000\001\002\003") _, err = ioutil.ReadFile(dirMap["/tip"] + "/file only on testbranch") c.Check(err, FitsTypeOf, &os.PathError{}) c.Check(os.IsNotExist(err), Equals, true) data, err = ioutil.ReadFile(dirMap["/non-tip"] + "/dir1/dir2/file with mode 0644") c.Check(err, IsNil) c.Check(string(data), Equals, "\000\001\002\003") data, err = ioutil.ReadFile(dirMap["/non-tip"] + "/file only on testbranch") c.Check(err, IsNil) c.Check(string(data), Equals, "testfile\n") cr.CleanupDirs() checkEmpty() } } func (s *TestSuite) TestStdout(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` api, cr, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), NotNil) } // Used by the TestStdoutWithWrongPath*() func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, err error) { rec := arvados.Container{} err = json.Unmarshal([]byte(record), &rec) c.Check(err, IsNil) s.docker.fn = fn s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) api = &ArvTestClient{Container: rec} kc := &KeepTestClient{} defer kc.Close() cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) am := &ArvMountCmdLine{} cr.RunArvMount = am.ArvMountTest cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) { return &ArvTestClient{}, &KeepTestClient{}, nil, nil } err = cr.Run() return } func (s *TestSuite) TestStdoutWithWrongPath(c *C) { _, _, err := s.stdoutErrorRunHelper(c, `{ "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path":"/tmpa.out"} }, "output_path": "/tmp", "state": "Locked" }`, func(t *TestDockerClient) {}) c.Check(err, NotNil) c.Check(strings.Contains(err.Error(), "Stdout path does not start with OutputPath"), Equals, true) } func (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) { _, _, err := s.stdoutErrorRunHelper(c, `{ "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "tmp", "path":"/tmp/a.out"} }, "output_path": "/tmp", "state": "Locked" }`, func(t *TestDockerClient) {}) c.Check(err, NotNil) c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'tmp' for stdout"), Equals, true) } func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) { _, _, err := s.stdoutErrorRunHelper(c, `{ "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "collection", "path":"/tmp/a.out"} }, "output_path": "/tmp", "state": "Locked" }`, func(t *TestDockerClient) {}) c.Check(err, NotNil) c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'collection' for stdout"), Equals, true) } func (s *TestSuite) TestFullRunWithAPI(c *C) { defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST")) os.Setenv("ARVADOS_API_HOST", "test.arvados.org") api, _, _ := s.fullRunHelper(c, `{ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {"API": true}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[1][17:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "test.arvados.org\n"), Equals, true) c.Check(api.CalledWith("container.output", "d41d8cd98f00b204e9800998ecf8427e+0"), NotNil) } func (s *TestSuite) TestFullRunSetOutput(c *C) { defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST")) os.Setenv("ARVADOS_API_HOST", "test.arvados.org") api, _, _ := s.fullRunHelper(c, `{ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {"API": true}, "state": "Locked" }`, nil, 0, func(t *TestDockerClient) { t.api.Container.Output = "d4ab34d3d4f8a72f5c4973051ae69fab+122" t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(api.CalledWith("container.output", "d4ab34d3d4f8a72f5c4973051ae69fab+122"), NotNil) } func (s *TestSuite) TestStdoutWithExcludeFromOutputMountPointUnderOutputDir(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"}, "/tmp/foo": {"kind": "collection", "portable_data_hash": "a3e8f74c6f101eae01fa08bfb4e49b3a+54", "exclude_from_output": true }, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` extraMounts := []string{"a3e8f74c6f101eae01fa08bfb4e49b3a+54"} api, cr, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), NotNil) } func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"}, "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/file2_in_main.txt"}, "/tmp/foo/sub1": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1"}, "/tmp/foo/sub1file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1/file2_in_subdir1.txt"}, "/tmp/foo/baz/sub2file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1/subdir2/file2_in_subdir2.txt"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` extraMounts := []string{ "a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt", "a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt", "a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt", } api, runner, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(runner.Binds, DeepEquals, []string{realtemp + "/tmp2:/tmp", realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt:/tmp/foo/bar:ro", realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt:/tmp/foo/baz/sub2file2:ro", realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1:/tmp/foo/sub1:ro", realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt:/tmp/foo/sub1file2:ro", }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) for _, v := range api.Content { if v["collection"] != nil { c.Check(v["ensure_unique_name"], Equals, true) collection := v["collection"].(arvadosclient.Dict) if strings.Index(collection["name"].(string), "output") == 0 { manifest := collection["manifest_text"].(string) c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out ./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:bar 36:18:sub1file2 ./foo/baz 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 9:18:sub2file2 ./foo/sub1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt ./foo/sub1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt `) } } } } func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"}, "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367", "path": "/subdir1/file2_in_subdir1.txt"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` extraMounts := []string{ "b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt", } api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) for _, v := range api.Content { if v["collection"] != nil { collection := v["collection"].(arvadosclient.Dict) if strings.Index(collection["name"].(string), "output") == 0 { manifest := collection["manifest_text"].(string) c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out ./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 10:17:bar `) } } } } func (s *TestSuite) TestOutputError(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` extraMounts := []string{} api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) { os.Symlink("/etc/hosts", t.realTemp+"/tmp2/baz") t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) } func (s *TestSuite) TestStdinCollectionMountPoint(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"}, "stdin": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367", "path": "/file1_in_main.txt"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` extraMounts := []string{ "b0def87f80dd594d4675809e83bd4f15+367/file1_in_main.txt", } api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) for _, v := range api.Content { if v["collection"] != nil { collection := v["collection"].(arvadosclient.Dict) if strings.Index(collection["name"].(string), "output") == 0 { manifest := collection["manifest_text"].(string) c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out `) } } } } func (s *TestSuite) TestStdinJsonMountPoint(c *C) { helperRecord := `{ "command": ["/bin/sh", "-c", "echo $FROBIZ"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "environment": {"FROBIZ": "bilbo"}, "mounts": { "/tmp": {"kind": "tmp"}, "stdin": {"kind": "json", "content": "foo"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) for _, v := range api.Content { if v["collection"] != nil { collection := v["collection"].(arvadosclient.Dict) if strings.Index(collection["name"].(string), "output") == 0 { manifest := collection["manifest_text"].(string) c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out `) } } } } func (s *TestSuite) TestStderrMount(c *C) { api, cr, _ := s.fullRunHelper(c, `{ "command": ["/bin/sh", "-c", "echo hello;exit 1"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path": "/tmp/a/out.txt"}, "stderr": {"kind": "file", "path": "/tmp/b/err.txt"}}, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 1, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello\n")) t.logWriter.Write(dockerLog(2, "oops\n")) t.logWriter.Close() }) final := api.CalledWith("container.state", "Complete") c.Assert(final, NotNil) c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1) c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a b1946ac92492d2347c6235b4d2611184+6 0:6:out.txt\n./b 38af5c54926b620264ab1501150cf189+5 0:5:err.txt\n"), NotNil) } func (s *TestSuite) TestNumberRoundTrip(c *C) { kc := &KeepTestClient{} defer kc.Close() cr, err := NewContainerRunner(s.client, &ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz") c.Assert(err, IsNil) cr.fetchContainerRecord() jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content) c.Check(err, IsNil) c.Check(string(jsondata), Equals, `{"number":123456789123456789}`) } func (s *TestSuite) TestFullBrokenDocker1(c *C) { tf, err := ioutil.TempFile("", "brokenNodeHook-") c.Assert(err, IsNil) defer os.Remove(tf.Name()) tf.Write([]byte(`#!/bin/sh exec echo killme `)) tf.Close() os.Chmod(tf.Name(), 0700) ech := tf.Name() brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 2, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Queued"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*") c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Running broken node hook.*") c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*killme.*") } func (s *TestSuite) TestFullBrokenDocker2(c *C) { ech := "" brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 2, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Queued"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*") c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*") } func (s *TestSuite) TestFullBrokenDocker3(c *C) { ech := "" brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 3, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*") } func (s *TestSuite) TestBadCommand1(c *C) { ech := "" brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 4, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*") } func (s *TestSuite) TestBadCommand2(c *C) { ech := "" brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 5, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*") } func (s *TestSuite) TestBadCommand3(c *C) { ech := "" brokenNodeHook = &ech api, _, _ := s.fullRunHelper(c, `{ "command": ["echo", "hello world"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": ".", "environment": {}, "mounts": {"/tmp": {"kind": "tmp"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }`, nil, 6, func(t *TestDockerClient) { t.logWriter.Write(dockerLog(1, "hello world\n")) t.logWriter.Close() }) c.Check(api.CalledWith("container.state", "Cancelled"), NotNil) c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*") } func (s *TestSuite) TestSecretTextMountPoint(c *C) { // under normal mounts, gets captured in output, oops helperRecord := `{ "command": ["true"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "mounts": { "/tmp": {"kind": "tmp"}, "/tmp/secret.conf": {"kind": "text", "content": "mypassword"} }, "secret_mounts": { }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` api, cr, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) { content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf") c.Check(err, IsNil) c.Check(content, DeepEquals, []byte("mypassword")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ""), IsNil) // under secret mounts, not captured in output helperRecord = `{ "command": ["true"], "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122", "cwd": "/bin", "mounts": { "/tmp": {"kind": "tmp"} }, "secret_mounts": { "/tmp/secret.conf": {"kind": "text", "content": "mypassword"} }, "output_path": "/tmp", "priority": 1, "runtime_constraints": {}, "state": "Locked" }` api, cr, _ = s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) { content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf") c.Check(err, IsNil) c.Check(content, DeepEquals, []byte("mypassword")) t.logWriter.Close() }) c.Check(api.CalledWith("container.exit_code", 0), NotNil) c.Check(api.CalledWith("container.state", "Complete"), NotNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil) c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ""), NotNil) } type FakeProcess struct { cmdLine []string } func (fp FakeProcess) CmdlineSlice() ([]string, error) { return fp.cmdLine, nil }
[ "\"ARVADOS_API_HOST\"", "\"ARVADOS_API_HOST\"" ]
[]
[ "ARVADOS_API_HOST" ]
[]
["ARVADOS_API_HOST"]
go
1
0
engine/test/e2e/e2e_test.go
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package e2e_test import ( "context" "encoding/json" "fmt" "io/ioutil" "os" "sync" "testing" "time" "github.com/stretchr/testify/require" "google.golang.org/grpc" "github.com/pingcap/tiflow/engine/client" pb "github.com/pingcap/tiflow/engine/enginepb" cvs "github.com/pingcap/tiflow/engine/jobmaster/cvsjob" engineModel "github.com/pingcap/tiflow/engine/model" ) type Config struct { DemoAddrs []string `json:"demo_address"` DemoHost []string `json:"demo_host"` MasterAddrs []string `json:"master_address_list"` RecordNum int64 `json:"demo_record_num"` JobNum int `json:"job_num"` DemoDataDir string `json:"demo_data_dir"` FileNum int `json:"file_num"` } func NewConfigFromFile(file string) (*Config, error) { data, err := ioutil.ReadFile(file) if err != nil { return nil, err } var config Config err = json.Unmarshal(data, &config) if err != nil { return nil, err } return &config, nil } type DemoClient struct { conn *grpc.ClientConn client pb.DataRWServiceClient } func NewDemoClient(ctx context.Context, addr string) (*DemoClient, error) { conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, err } return &DemoClient{ conn: conn, client: pb.NewDataRWServiceClient(conn), }, err } func TestSubmitTest(t *testing.T) { configPath := os.Getenv("CONFIG") if configPath == "" { configPath = "./docker.json" } config, err := NewConfigFromFile(configPath) require.NoError(t, err) for _, demoAddr := range config.DemoAddrs { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() democlient, err := NewDemoClient(ctx, demoAddr) require.Nil(t, err) fmt.Println("connect demo " + demoAddr) resp, err := democlient.client.GenerateData(ctx, &pb.GenerateDataRequest{ FileNum: int32(config.FileNum), RecordNum: int32(config.RecordNum), }) require.Nil(t, err) require.Empty(t, resp.ErrMsg) } flowControl := make(chan struct{}, 50) // avoid job swarming go func() { for i := 1; i <= config.JobNum; i++ { if i%50 == 0 { time.Sleep(100 * time.Millisecond) } flowControl <- struct{}{} } }() var wg sync.WaitGroup wg.Add(config.JobNum) for i := 1; i <= config.JobNum; i++ { demoAddr := config.DemoAddrs[i%len(config.DemoAddrs)] demoHost := config.DemoHost[i%len(config.DemoHost)] go func(idx int) { defer wg.Done() cfg := &cvs.Config{ DstDir: fmt.Sprintf(config.DemoDataDir+"/data%d", idx), SrcHost: demoHost, DstHost: demoHost, FileNum: config.FileNum, } testSubmitTest(t, cfg, config, demoAddr, flowControl) }(i) } wg.Wait() } // run this test after docker-compose has been up func testSubmitTest(t *testing.T, cfg *cvs.Config, config *Config, demoAddr string, flowControl chan struct{}) { ctx := context.Background() fmt.Printf("connect demo\n") democlient, err := NewDemoClient(ctx, demoAddr) require.Nil(t, err) fmt.Printf("connect clients\n") masterclient, err := client.NewMasterClient(ctx, config.MasterAddrs) require.Nil(t, err) for { resp, err := democlient.client.IsReady(ctx, &pb.IsReadyRequest{}) require.Nil(t, err) if resp.Ready { break } time.Sleep(10 * time.Millisecond) } configBytes, err := json.Marshal(cfg) require.Nil(t, err) <-flowControl fmt.Printf("test is ready\n") resp, err := masterclient.SubmitJob(ctx, &pb.SubmitJobRequest{ Tp: int32(engineModel.JobTypeCVSDemo), Config: configBytes, }) require.Nil(t, err) require.Nil(t, resp.Err) fmt.Printf("job id %s\n", resp.JobId) queryReq := &pb.QueryJobRequest{ JobId: resp.JobId, } // continue to query for { ctx1, cancel := context.WithTimeout(ctx, 3*time.Second) queryResp, err := masterclient.QueryJob(ctx1, queryReq) require.NoError(t, err) require.Nil(t, queryResp.Err) require.Equal(t, queryResp.Tp, int32(engineModel.JobTypeCVSDemo)) cancel() fmt.Printf("query id %s, status %d, time %s\n", resp.JobId, int(queryResp.Status), time.Now().Format("2006-01-02 15:04:05")) if queryResp.Status == pb.QueryJobResponse_finished { break } time.Sleep(time.Second) } fmt.Printf("job id %s checking\n", resp.JobId) // check files demoResp, err := democlient.client.CheckDir(ctx, &pb.CheckDirRequest{ Dir: cfg.DstDir, }) require.Nil(t, err, resp.JobId) require.Empty(t, demoResp.ErrMsg, demoResp.ErrFileIdx) }
[ "\"CONFIG\"" ]
[]
[ "CONFIG" ]
[]
["CONFIG"]
go
1
0
third_party/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/test.py
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import webapp2 from perf_insights import cloud_config def _is_devserver(): return os.environ.get('SERVER_SOFTWARE', '').startswith('Development') _DEFAULT_MAPPER = """ <!DOCTYPE html> <!-- Copyright (c) 2015 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. --> <link rel="import" href="/perf_insights/function_handle.html"> <link rel="import" href="/tracing/value/value.html"> <script> 'use strict'; tr.exportTo('pi.m', function() { function testMapFunction(results, runInfo, model) { var someValue = 4; // Chosen by fair roll of the dice. results.addResult('simon', {value: someValue}); } pi.FunctionRegistry.register(testMapFunction); return { testMapFunction: testMapFunction }; }); </script> """ _DEFAULT_REDUCER = """ <!DOCTYPE html> <!-- Copyright (c) 2015 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. --> <link rel="import" href="/perf_insights/function_handle.html"> <script> 'use strict'; tr.exportTo('pi.r', function() { function testReduceFunction(key, mapResults) { return {value: mapResults[key].value}; } pi.FunctionRegistry.register(testReduceFunction); return { testReduceFunction: testReduceFunction }; }); </script> """ _DEFAULT_FUNCTION = 'testMapFunction' _DEFAULT_REDUCER_FUNCTION = 'testReduceFunction' _FORM_HTML = """ <!DOCTYPE html> <html> <body> <form action="/cloud_mapper/create" method="POST"> Mapper: <br><textarea rows="15" cols="80" name="mapper">{mapper}</textarea> <br> FunctionName: <br><input type="text" name="mapper_function" value="{mapper_function}"/> <br> Reducer: <br><textarea rows="15" cols="80" name="reducer">{reducer}</textarea> <br> ReducerName: <br><input type="text" name="reducer_function" value="{reducer_function}"/> <br> Query: <br><input type="text" name="query" value="{query}"/> <br> Corpus: <br><input type="text" name="corpus" value="{corpus}"/> <br> <input type="submit" name="submit" value="Submit"/> </form> </body> </html> """ class TestPage(webapp2.RequestHandler): def get(self): form_html = _FORM_HTML.format(mapper=_DEFAULT_MAPPER, mapper_function=_DEFAULT_FUNCTION, reducer=_DEFAULT_REDUCER, reducer_function=_DEFAULT_REDUCER_FUNCTION, query='MAX_TRACE_HANDLES=10', corpus=cloud_config.Get().default_corpus) self.response.out.write(form_html) app = webapp2.WSGIApplication([('/cloud_mapper/test', TestPage)])
[]
[]
[ "SERVER_SOFTWARE" ]
[]
["SERVER_SOFTWARE"]
python
1
0
drf-base/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf-base.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
contrib/gitian-build.py
#!/usr/bin/env python3 import argparse import os import subprocess import sys def setup(): global args, workdir programs = ['ruby', 'git', 'make', 'wget', 'curl'] if args.kvm: programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils'] elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'): dockers = ['docker.io', 'docker-ce'] for i in dockers: return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) if return_code == 0: break if return_code != 0: print('Cannot find any way to install Docker.', file=sys.stderr) sys.exit(1) else: programs += ['apt-cacher-ng', 'lxc', 'debootstrap'] subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) if not os.path.isdir('gitian.sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/cicoin-core/gitian.sigs.git']) if not os.path.isdir('cicoin-detached-sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/cicoin-core/cicoin-detached-sigs.git']) if not os.path.isdir('gitian-builder'): subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) if not os.path.isdir('cicoin'): subprocess.check_call(['git', 'clone', 'https://github.com/cicoin/cicoin.git']) os.chdir('gitian-builder') make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: make_image_prog += ['--lxc'] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') sys.exit(0) def build(): global args, workdir os.makedirs('cicoin-binaries/' + args.version, exist_ok=True) print('\nBuilding Dependencies\n') os.chdir('gitian-builder') os.makedirs('inputs', exist_ok=True) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz']) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://cicoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch']) subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True) subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True) subprocess.check_call(['make', '-C', '../cicoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) if args.linux: print('\nCompiling ' + args.version + ' Linux') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cicoin='+args.commit, '--url', 'cicoin='+args.url, '../cicoin/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../cicoin/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call('mv build/out/cicoin-*.tar.gz build/out/src/cicoin-*.tar.gz ../cicoin-binaries/'+args.version, shell=True) if args.windows: print('\nCompiling ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cicoin='+args.commit, '--url', 'cicoin='+args.url, '../cicoin/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../cicoin/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call('mv build/out/cicoin-*-win-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/cicoin-*.zip build/out/cicoin-*.exe build/out/src/cicoin-*.tar.gz ../cicoin-binaries/'+args.version, shell=True) if args.macos: print('\nCompiling ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cicoin='+args.commit, '--url', 'cicoin='+args.url, '../cicoin/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../cicoin/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/cicoin-*-osx-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/cicoin-*.tar.gz build/out/cicoin-*.dmg build/out/src/cicoin-*.tar.gz ../cicoin-binaries/'+args.version, shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Unsigned Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) os.chdir(workdir) def sign(): global args, workdir os.chdir('gitian-builder') if args.windows: print('\nSigning ' + args.version + ' Windows') subprocess.check_call('cp inputs/cicoin-' + args.version + '-win-unsigned.tar.gz inputs/cicoin-win-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../cicoin/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../cicoin/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call('mv build/out/cicoin-*win64-setup.exe ../cicoin-binaries/'+args.version, shell=True) if args.macos: print('\nSigning ' + args.version + ' MacOS') subprocess.check_call('cp inputs/cicoin-' + args.version + '-osx-unsigned.tar.gz inputs/cicoin-osx-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../cicoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../cicoin/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call('mv build/out/cicoin-osx-signed.dmg ../cicoin-binaries/'+args.version+'/cicoin-'+args.version+'-osx.dmg', shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Signed Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) os.chdir(workdir) def verify(): global args, workdir rc = 0 os.chdir('gitian-builder') print('\nVerifying v'+args.version+' Linux\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../cicoin/contrib/gitian-descriptors/gitian-linux.yml']): print('Verifying v'+args.version+' Linux FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Windows\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../cicoin/contrib/gitian-descriptors/gitian-win.yml']): print('Verifying v'+args.version+' Windows FAILED\n') rc = 1 print('\nVerifying v'+args.version+' MacOS\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../cicoin/contrib/gitian-descriptors/gitian-osx.yml']): print('Verifying v'+args.version+' MacOS FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Signed Windows\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../cicoin/contrib/gitian-descriptors/gitian-win-signer.yml']): print('Verifying v'+args.version+' Signed Windows FAILED\n') rc = 1 print('\nVerifying v'+args.version+' Signed MacOS\n') if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../cicoin/contrib/gitian-descriptors/gitian-osx-signer.yml']): print('Verifying v'+args.version+' Signed MacOS FAILED\n') rc = 1 os.chdir(workdir) return rc def main(): global args, workdir parser = argparse.ArgumentParser(description='Script for running full Gitian builds.') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') parser.add_argument('-u', '--url', dest='url', default='https://github.com/cicoin/cicoin', help='Specify the URL of the repository. Default is %(default)s') parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)') parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file') parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') args = parser.parse_args() workdir = os.getcwd() args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') # Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they # can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm). os.environ['USE_LXC'] = '' os.environ['USE_VBOX'] = '' os.environ['USE_DOCKER'] = '' if args.docker: os.environ['USE_DOCKER'] = '1' elif not args.kvm: os.environ['USE_LXC'] = '1' if 'GITIAN_HOST_IP' not in os.environ.keys(): os.environ['GITIAN_HOST_IP'] = '10.0.3.1' if 'LXC_GUEST_IP' not in os.environ.keys(): os.environ['LXC_GUEST_IP'] = '10.0.3.5' if args.setup: setup() if args.buildsign: args.build = True args.sign = True if not args.build and not args.sign and not args.verify: sys.exit(0) args.linux = 'l' in args.os args.windows = 'w' in args.os args.macos = 'm' in args.os # Disable for MacOS if no SDK found if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'): print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') args.macos = False args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' script_name = os.path.basename(sys.argv[0]) if not args.signer: print(script_name+': Missing signer') print('Try '+script_name+' --help for more information') sys.exit(1) if not args.version: print(script_name+': Missing version') print('Try '+script_name+' --help for more information') sys.exit(1) # Add leading 'v' for tags if args.commit and args.pull: raise Exception('Cannot have both commit and pull') args.commit = ('' if args.commit else 'v') + args.version os.chdir('cicoin') if args.pull: subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) os.chdir('../gitian-builder/inputs/cicoin') subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() args.version = 'pull-' + args.version print(args.commit) subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', args.commit]) os.chdir(workdir) os.chdir('gitian-builder') subprocess.check_call(['git', 'pull']) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: os.chdir('gitian.sigs') subprocess.check_call(['git', 'pull']) os.chdir(workdir) sys.exit(verify()) if __name__ == '__main__': main()
[]
[]
[ "USE_DOCKER", "USE_LXC", "USE_VBOX", "GITIAN_HOST_IP", "LXC_GUEST_IP" ]
[]
["USE_DOCKER", "USE_LXC", "USE_VBOX", "GITIAN_HOST_IP", "LXC_GUEST_IP"]
python
5
0
astropy/conftest.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file contains pytest configuration settings that are astropy-specific (i.e. those that would not necessarily be shared by affiliated packages making use of astropy's test runner). """ import os import builtins import tempfile try: from pytest_astropy_header.display import PYTEST_HEADER_MODULES except ImportError: PYTEST_HEADER_MODULES = {} from astropy.tests.helper import enable_deprecations_as_exceptions try: import matplotlib except ImportError: HAS_MATPLOTLIB = False else: HAS_MATPLOTLIB = True enable_deprecations_as_exceptions( include_astropy_deprecations=False, # This is a workaround for the OpenSSL deprecation warning that comes from # the `requests` module. It only appears when both asdf and sphinx are # installed. This can be removed once pyopenssl 1.7.20+ is released. modules_to_ignore_on_import=['requests'], warnings_to_ignore_by_pyver={ # This warning shows up in mpl <3.1.2 on python 3.8, # remove once 3.1.2 is released (3, 8): set([(r"In future, it will be an error for 'np.bool_' scalars " "to be interpreted as an index", DeprecationWarning),])}) if HAS_MATPLOTLIB: matplotlib.use('Agg') matplotlibrc_cache = {} def pytest_configure(config): builtins._pytest_running = True # do not assign to matplotlibrc_cache in function scope if HAS_MATPLOTLIB: matplotlibrc_cache.update(matplotlib.rcParams) matplotlib.rcdefaults() # Make sure we use temporary directories for the config and cache # so that the tests are insensitive to local configuration. Note that this # is also set in the test runner, but we need to also set it here for # things to work properly in parallel mode builtins._xdg_config_home_orig = os.environ.get('XDG_CONFIG_HOME') builtins._xdg_cache_home_orig = os.environ.get('XDG_CACHE_HOME') os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config') os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache') os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy')) os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy')) config.option.astropy_header = True PYTEST_HEADER_MODULES['Cython'] = 'cython' PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage' PYTEST_HEADER_MODULES['asdf'] = 'asdf' def pytest_unconfigure(config): builtins._pytest_running = False # do not assign to matplotlibrc_cache in function scope if HAS_MATPLOTLIB: matplotlib.rcParams.update(matplotlibrc_cache) matplotlibrc_cache.clear() if builtins._xdg_config_home_orig is None: os.environ.pop('XDG_CONFIG_HOME') else: os.environ['XDG_CONFIG_HOME'] = builtins._xdg_config_home_orig if builtins._xdg_cache_home_orig is None: os.environ.pop('XDG_CACHE_HOME') else: os.environ['XDG_CACHE_HOME'] = builtins._xdg_cache_home_orig def pytest_terminal_summary(terminalreporter): """Output a warning to IPython users in case any tests failed.""" try: get_ipython() except NameError: return if not terminalreporter.stats.get('failed'): # Only issue the warning when there are actually failures return terminalreporter.ensure_newline() terminalreporter.write_line( 'Some tests are known to fail when run from the IPython prompt; ' 'especially, but not limited to tests involving logging and warning ' 'handling. Unless you are certain as to the cause of the failure, ' 'please check that the failure occurs outside IPython as well. See ' 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-' 'tests-when-running-the-tests-in-ipython for more information.', yellow=True, bold=True)
[]
[]
[ "XDG_CONFIG_HOME", "XDG_CACHE_HOME" ]
[]
["XDG_CONFIG_HOME", "XDG_CACHE_HOME"]
python
2
0
cmd/cmdexecutor/cmdexecutor.go
package main import ( "flag" "fmt" "os" "strings" "time" "github.com/libopenstorage/stork/pkg/cmdexecutor" "github.com/libopenstorage/stork/pkg/cmdexecutor/status" "github.com/libopenstorage/stork/pkg/version" "github.com/sirupsen/logrus" ) const ( defaultStatusCheckTimeout = 900 statusFile = "/tmp/cmdexecutor-status" ) type arrayFlags []string func (i *arrayFlags) String() string { return strings.Join(*i, ",") } func (i *arrayFlags) Set(value string) error { *i = append(*i, value) return nil } func parsePodNameAndNamespace(podString string) (string, string, error) { if strings.Contains(podString, "/") { parts := strings.Split(podString, "/") if len(parts) != 2 { return "", "", fmt.Errorf("invalid pod string: %s", podString) } return parts[0], parts[1], nil } return "default", podString, nil } func createPodStringFromNameAndNamespace(namespace, name string) string { return namespace + "/" + name } func main() { logrus.Infof("Running pod command executor: %v", version.Version) flag.Parse() if len(podList) == 0 { logrus.Fatalf("no pods specified to the command executor") } if len(command) == 0 { logrus.Fatalf("no command specified to the command executor") } if len(taskID) == 0 { logrus.Fatalf("no taskid specified to the command executor") } logrus.Infof("Using timeout: %v seconds", statusCheckTimeout) _, err := os.Stat(statusFile) if err == nil { err = os.Remove(statusFile) if err != nil { logrus.Fatalf("failed to remove statusfile: %s due to: %v", statusFile, err) } } // Get hostname which will be used as a key to track status of this command executor's commands hostname, err := getHostname() if err != nil { logrus.Fatalf(err.Error()) } executors := make([]cmdexecutor.Executor, 0) errChans := make(map[string]chan error) // Start the commands for _, pod := range podList { namespace, name, err := parsePodNameAndNamespace(pod) if err != nil { logrus.Fatalf("failed to parse pod due to: %v", err) } executor := cmdexecutor.Init(namespace, name, podContainer, command, taskID) errChan := make(chan error) errChans[createPodStringFromNameAndNamespace(namespace, name)] = errChan err = executor.Start(errChan) if err != nil { msg := fmt.Sprintf("failed to run command in pod: [%s] %s due to: %v", namespace, name, err) persistStatusErr := status.Persist(hostname, msg) if persistStatusErr != nil { logrus.Warnf("failed to persist cmd executor status due to: %v", persistStatusErr) } logrus.Fatalf(msg) } executors = append(executors, executor) } // Create an aggregrate channel for all error channels for above executors aggErrorChan := make(chan error) for _, ch := range errChans { go func(c chan error) { for err := range c { aggErrorChan <- err } }(ch) } // Check command status done := make(chan bool) logrus.Infof("Checking status on command: %s", command) for _, executor := range executors { ns, name := executor.GetPod() podKey := createPodStringFromNameAndNamespace(ns, name) go func(errChan chan error, doneChan chan bool, execInst cmdexecutor.Executor) { err := execInst.Wait(time.Duration(statusCheckTimeout) * time.Second) if err != nil { errChan <- err return } doneChan <- true }(errChans[podKey], done, executor) } // Now go into a wait loop which will exit if either of the 2 things happen // 1) If any of the executors return error (FAIL) // 2) All the executors complete successfully (PASS) doneCount := 0 Loop: for { select { case err := <-aggErrorChan: // If we hit any error, persist the error using hostname as key and then exit persistStatusErr := status.Persist(hostname, err.Error()) if persistStatusErr != nil { logrus.Warnf("failed to persist cmd executor status due to: %v", persistStatusErr) } logrus.Fatalf(err.Error()) case isDone := <-done: if isDone { // as each executor is done, track how many are done doneCount++ if doneCount == len(executors) { logrus.Infof("successfully executed command: %s on all pods: %v", command, podList) _, err = os.OpenFile(statusFile, os.O_RDONLY|os.O_CREATE, 0666) if err != nil { logrus.Fatalf("failed to create statusfile: %s due to: %v", statusFile, err) } // All executors are done, we can exit successfully now break Loop } } } } } func getHostname() (string, error) { var err error hostname := os.Getenv("HOSTNAME") if len(hostname) == 0 { hostname, err = os.Hostname() if err != nil { return "", fmt.Errorf("failed to get hostname of command executor due to: %v", err) } } return hostname, nil } // command line arguments var ( podList arrayFlags podContainer string command string statusCheckTimeout int64 taskID string ) func init() { flag.Var(&podList, "pod", "Pod on which to run the command. Format: <pod-namespace>/<pod-name> e.g dev/pod-12345") flag.StringVar(&podContainer, "container", "", "(Optional) name of the container within the pod on which to run the command. If not specified, executor will pick the first container.") flag.StringVar(&command, "cmd", "", "The command to run inside the pod") flag.StringVar(&taskID, "taskid", "", "A unique ID the caller can provide which can be later used to clean the status files created by the command executor.") flag.Int64Var(&statusCheckTimeout, "timeout", int64(defaultStatusCheckTimeout), "Time in seconds to wait for the command to succeeded on a single pod") }
[ "\"HOSTNAME\"" ]
[]
[ "HOSTNAME" ]
[]
["HOSTNAME"]
go
1
0
project/model.py
"""Create model.""" # coding=utf-8 # # /************************************************************************************ # *** # *** Copyright Dell 2021, All Rights Reserved. # *** # *** File Author: Dell, 2021年 03月 02日 星期二 12:48:05 CST # *** # ************************************************************************************/ # import math import os import pdb import sys import torch import torch.nn as nn from tqdm import tqdm def model_save(model, path): """Save model.""" torch.save(model.state_dict(), path) class Counter(object): """Class Counter.""" def __init__(self): """Init average.""" self.reset() def reset(self): """Reset average.""" self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): """Update average.""" self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def train_epoch(loader, model, optimizer, device, tag=""): """Trainning model ...""" total_loss = Counter() model.train() with tqdm(total=len(loader.dataset)) as t: t.set_description(tag) for data in loader: images, labels = data count = len(images) # Transform data to device images = images.to(device) labels = labels.to(device) labels = labels.repeat(1, model.output_latents, 1) predicts = model(images) loss = nn.L1Loss()(predicts, labels) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) # Update loss total_loss.update(loss_value, count) t.set_postfix(loss=">{:.6f}".format(total_loss.avg)) t.update(count) # Optimizer optimizer.zero_grad() loss.backward() optimizer.step() return total_loss.avg def valid_epoch(loader, model, device, tag=""): """Validating model ...""" valid_loss = Counter() model.eval() with tqdm(total=len(loader.dataset)) as t: t.set_description(tag) for data in loader: images, labels = data count = len(images) # Transform data to device images = images.to(device) labels = labels.to(device) labels = labels.repeat(1, model.output_latents, 1) # Predict results without calculating gradients with torch.no_grad(): predicts = model(images) loss = nn.L1Loss()(predicts, labels) loss_value = loss.item() valid_loss.update(loss_value, count) t.set_postfix(loss="---{:.6f}".format(valid_loss.avg)) t.update(count) def model_device(): """Please call after model_setenv.""" return torch.device(os.environ["DEVICE"]) def model_setenv(): """Setup environ ...""" # random init ... import random random.seed(42) torch.manual_seed(42) # Set default device to avoid exceptions if os.environ.get("DEVICE") != "cuda" and os.environ.get("DEVICE") != "cpu": os.environ["DEVICE"] = "cuda" if torch.cuda.is_available() else "cpu" if os.environ["DEVICE"] == "cuda": torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True print("Running Environment:") print("----------------------------------------------") print(" PWD: ", os.environ["PWD"]) print(" DEVICE: ", os.environ["DEVICE"])
[]
[]
[ "DEVICE", "PWD" ]
[]
["DEVICE", "PWD"]
python
2
0
aion/mysql/query.py
# coding: utf-8 # Copyright (c) 2019-2020 Latona. All rights reserved. import os import traceback import MySQLdb from aion.logger import lprint, lprint_exception class BaseMysqlAccess(): cursor = None default_mysql_host = "mysql" default_mysql_port = "3306" default_mysql_user = "latona" def __init__(self, db_name): self._db_name = db_name def __enter__(self): try: self.connection = MySQLdb.connect( host=os.environ.get('MY_MYSQL_HOST', self.default_mysql_host), port=int(os.environ.get('MY_MYSQL_PORT', self.default_mysql_port)), user=os.environ.get('MYSQL_USER', self.default_mysql_user), passwd=os.environ.get('MYSQL_PASSWORD'), db=self._db_name, charset='utf8') self.cursor = self.connection.cursor(MySQLdb.cursors.DictCursor) except MySQLdb.Error as e: lprint("cant connect mysql") lprint_exception(e) self.cursor = None raise e return self def __exit__(self, exc_type, exc_value, tb): if exc_type is not None: for message in traceback.format_exception(exc_type, exc_value, tb): lprint(message.rstrip('\n')) if self.cursor: self.cursor.close() self.connection.close() return True def get_query(self, sql, args=None): if not self.cursor: return None self.cursor.execute(sql, args) return self.cursor.fetchone() def get_query_list(self, size, sql, args=None): if not self.cursor: return None self.cursor.execute(sql, args) return self.cursor.fetchmany(size) def set_query(self, sql, args=None): if not self.cursor: return False self.cursor.execute(sql, args) return True def commit_query(self): self.connection.commit() def is_connect(self): return bool(self.cursor)
[]
[]
[ "MYSQL_USER", "MY_MYSQL_PORT", "MY_MYSQL_HOST", "MYSQL_PASSWORD" ]
[]
["MYSQL_USER", "MY_MYSQL_PORT", "MY_MYSQL_HOST", "MYSQL_PASSWORD"]
python
4
0
educate/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "educate.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
pkg/start/start.go
// package start initializes and launches the core cluster version operator // loops. package start import ( "context" "crypto/tls" "fmt" "io/ioutil" "math/rand" "os" "os/signal" "syscall" "time" "github.com/google/uuid" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" clientset "github.com/openshift/client-go/config/clientset/versioned" externalversions "github.com/openshift/client-go/config/informers/externalversions" "github.com/openshift/cluster-version-operator/pkg/autoupdate" "github.com/openshift/cluster-version-operator/pkg/cvo" "github.com/openshift/cluster-version-operator/pkg/internal" "github.com/openshift/cluster-version-operator/pkg/payload" "github.com/openshift/library-go/pkg/crypto" ) const ( defaultComponentName = "version" defaultComponentNamespace = "openshift-cluster-version" minResyncPeriod = 2 * time.Minute leaseDuration = 137 * time.Second renewDeadline = 107 * time.Second retryPeriod = 26 * time.Second ) // Options are the valid inputs to starting the CVO. type Options struct { ReleaseImage string ServingCertFile string ServingKeyFile string Kubeconfig string NodeName string ListenAddr string EnableAutoUpdate bool EnableDefaultClusterVersion bool // Exclude is used to determine whether to exclude // certain manifests based on an annotation: // exclude.release.openshift.io/<identifier>=true Exclude string ClusterProfile string // for testing only Name string Namespace string PayloadOverride string ResyncInterval time.Duration } type asyncResult struct { name string error error } func defaultEnv(name, defaultValue string) string { env, ok := os.LookupEnv(name) if !ok { return defaultValue } return env } // NewOptions creates the default options for the CVO and loads any environment // variable overrides. func NewOptions() *Options { return &Options{ ListenAddr: "0.0.0.0:9099", NodeName: os.Getenv("NODE_NAME"), // exposed only for testing Namespace: defaultEnv("CVO_NAMESPACE", defaultComponentNamespace), Name: defaultEnv("CVO_NAME", defaultComponentName), PayloadOverride: os.Getenv("PAYLOAD_OVERRIDE"), ResyncInterval: minResyncPeriod, Exclude: os.Getenv("EXCLUDE_MANIFESTS"), ClusterProfile: defaultEnv("CLUSTER_PROFILE", payload.DefaultClusterProfile), } } func (o *Options) Run(ctx context.Context) error { if o.NodeName == "" { return fmt.Errorf("node-name is required") } if o.ReleaseImage == "" { return fmt.Errorf("missing --release-image flag, it is required") } if o.ListenAddr != "" && o.ServingCertFile == "" { return fmt.Errorf("--listen was not set empty, so --serving-cert-file must be set") } if o.ListenAddr != "" && o.ServingKeyFile == "" { return fmt.Errorf("--listen was not set empty, so --serving-key-file must be set") } if len(o.PayloadOverride) > 0 { klog.Warningf("Using an override payload directory for testing only: %s", o.PayloadOverride) } if len(o.Exclude) > 0 { klog.Infof("Excluding manifests for %q", o.Exclude) } // initialize the core objects cb, err := newClientBuilder(o.Kubeconfig) if err != nil { return fmt.Errorf("error creating clients: %v", err) } lock, err := createResourceLock(cb, o.Namespace, o.Name) if err != nil { return err } // initialize the controllers and attempt to load the payload information controllerCtx := o.NewControllerContext(cb) if err := controllerCtx.CVO.InitializeFromPayload(cb.RestConfig(defaultQPS), cb.RestConfig(highQPS)); err != nil { return err } o.run(ctx, controllerCtx, lock) return nil } func (o *Options) makeTLSConfig() (*tls.Config, error) { // Load the initial certificate contents. certBytes, err := ioutil.ReadFile(o.ServingCertFile) if err != nil { return nil, err } keyBytes, err := ioutil.ReadFile(o.ServingKeyFile) if err != nil { return nil, err } certificate, err := tls.X509KeyPair(certBytes, keyBytes) if err != nil { return nil, err } return crypto.SecureTLSConfig(&tls.Config{ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { return &certificate, nil }, }), nil } // run launches a number of goroutines to handle manifest application, // metrics serving, etc. It continues operating until ctx.Done(), // and then attempts a clean shutdown limited by an internal context // with a two-minute cap. It returns after it successfully collects all // launched goroutines. func (o *Options) run(ctx context.Context, controllerCtx *Context, lock *resourcelock.ConfigMapLock) { runContext, runCancel := context.WithCancel(ctx) // so we can cancel internally on errors or TERM defer runCancel() shutdownContext, shutdownCancel := context.WithCancel(context.Background()) // extends beyond ctx defer shutdownCancel() postMainContext, postMainCancel := context.WithCancel(context.Background()) // extends beyond ctx defer postMainCancel() launchedMain := false ch := make(chan os.Signal, 1) defer func() { signal.Stop(ch) }() signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { defer utilruntime.HandleCrash() sig := <-ch klog.Infof("Shutting down due to %s", sig) runCancel() sig = <-ch klog.Fatalf("Received shutdown signal twice, exiting: %s", sig) }() resultChannel := make(chan asyncResult, 1) resultChannelCount := 0 var tlsConfig *tls.Config if o.ListenAddr != "" { var err error tlsConfig, err = o.makeTLSConfig() if err != nil { klog.Fatalf("Failed to create TLS config: %v", err) } } informersDone := postMainContext.Done() // FIXME: would be nice if there was a way to collect these. controllerCtx.CVInformerFactory.Start(informersDone) controllerCtx.OpenshiftConfigInformerFactory.Start(informersDone) controllerCtx.OpenshiftConfigManagedInformerFactory.Start(informersDone) controllerCtx.InformerFactory.Start(informersDone) resultChannelCount++ go func() { defer utilruntime.HandleCrash() leaderelection.RunOrDie(postMainContext, leaderelection.LeaderElectionConfig{ Lock: lock, ReleaseOnCancel: true, LeaseDuration: leaseDuration, RenewDeadline: renewDeadline, RetryPeriod: retryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(_ context.Context) { // no need for this passed-through postMainContext, because goroutines we launch inside will use runContext launchedMain = true if o.ListenAddr != "" { resultChannelCount++ go func() { defer utilruntime.HandleCrash() err := cvo.RunMetrics(postMainContext, shutdownContext, o.ListenAddr, tlsConfig) resultChannel <- asyncResult{name: "metrics server", error: err} }() } resultChannelCount++ go func() { defer utilruntime.HandleCrash() err := controllerCtx.CVO.Run(runContext, shutdownContext, 2) resultChannel <- asyncResult{name: "main operator", error: err} }() if controllerCtx.AutoUpdate != nil { resultChannelCount++ go func() { defer utilruntime.HandleCrash() err := controllerCtx.AutoUpdate.Run(runContext, 2) resultChannel <- asyncResult{name: "auto-update controller", error: err} }() } }, OnStoppedLeading: func() { klog.Info("Stopped leading; shutting down.") runCancel() }, }, }) resultChannel <- asyncResult{name: "leader controller", error: nil} }() var shutdownTimer *time.Timer for resultChannelCount > 0 { klog.Infof("Waiting on %d outstanding goroutines.", resultChannelCount) if shutdownTimer == nil { // running select { case <-runContext.Done(): klog.Info("Run context completed; beginning two-minute graceful shutdown period.") shutdownTimer = time.NewTimer(2 * time.Minute) if !launchedMain { // no need to give post-main extra time if main never ran postMainCancel() } case result := <-resultChannel: resultChannelCount-- if result.error == nil { klog.Infof("Collected %s goroutine.", result.name) } else { klog.Errorf("Collected %s goroutine: %v", result.name, result.error) runCancel() // this will cause shutdownTimer initialization in the next loop } if result.name == "main operator" { postMainCancel() } } } else { // shutting down select { case <-shutdownTimer.C: // never triggers after the channel is stopped, although it would not matter much if it did because subsequent cancel calls do nothing. postMainCancel() shutdownCancel() shutdownTimer.Stop() case result := <-resultChannel: resultChannelCount-- if result.error == nil { klog.Infof("Collected %s goroutine.", result.name) } else { klog.Errorf("Collected %s goroutine: %v", result.name, result.error) } if result.name == "main operator" { postMainCancel() } } } } klog.Info("Finished collecting operator goroutines.") } // createResourceLock initializes the lock. func createResourceLock(cb *ClientBuilder, namespace, name string) (*resourcelock.ConfigMapLock, error) { client := cb.KubeClientOrDie("leader-election") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: client.CoreV1().Events(namespace)}) id, err := os.Hostname() if err != nil { return nil, fmt.Errorf("error creating lock: %v", err) } uuid, err := uuid.NewRandom() if err != nil { return nil, fmt.Errorf("Failed to generate UUID: %v", err) } // add a uniquifier so that two processes on the same host don't accidentally both become active id = id + "_" + uuid.String() return &resourcelock.ConfigMapLock{ ConfigMapMeta: metav1.ObjectMeta{ Namespace: namespace, Name: name, }, Client: client.CoreV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: namespace}), }, }, nil } func resyncPeriod(minResyncPeriod time.Duration) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) } } // ClientBuilder simplifies returning Kubernetes client and client configs with // an appropriate user agent. type ClientBuilder struct { config *rest.Config } // RestConfig returns a copy of the ClientBuilder's rest.Config with any overrides // from the provided configFns applied. func (cb *ClientBuilder) RestConfig(configFns ...func(*rest.Config)) *rest.Config { c := rest.CopyConfig(cb.config) for _, fn := range configFns { fn(c) } return c } func (cb *ClientBuilder) ClientOrDie(name string, configFns ...func(*rest.Config)) clientset.Interface { return clientset.NewForConfigOrDie(rest.AddUserAgent(cb.RestConfig(configFns...), name)) } func (cb *ClientBuilder) KubeClientOrDie(name string, configFns ...func(*rest.Config)) kubernetes.Interface { return kubernetes.NewForConfigOrDie(rest.AddUserAgent(cb.RestConfig(configFns...), name)) } func newClientBuilder(kubeconfig string) (*ClientBuilder, error) { clientCfg := clientcmd.NewDefaultClientConfigLoadingRules() clientCfg.ExplicitPath = kubeconfig kcfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientCfg, &clientcmd.ConfigOverrides{}) config, err := kcfg.ClientConfig() if err != nil { return nil, err } return &ClientBuilder{ config: config, }, nil } func defaultQPS(config *rest.Config) { config.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(20, 40) } func highQPS(config *rest.Config) { config.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(40, 80) } func useProtobuf(config *rest.Config) { config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" config.ContentType = "application/vnd.kubernetes.protobuf" } // Context holds the controllers for this operator and exposes a unified start command. type Context struct { CVO *cvo.Operator AutoUpdate *autoupdate.Controller CVInformerFactory externalversions.SharedInformerFactory OpenshiftConfigInformerFactory informers.SharedInformerFactory OpenshiftConfigManagedInformerFactory informers.SharedInformerFactory InformerFactory externalversions.SharedInformerFactory } // NewControllerContext initializes the default Context for the current Options. It does // not start any background processes. func (o *Options) NewControllerContext(cb *ClientBuilder) *Context { client := cb.ClientOrDie("shared-informer") kubeClient := cb.KubeClientOrDie(internal.ConfigNamespace, useProtobuf) cvInformer := externalversions.NewFilteredSharedInformerFactory(client, resyncPeriod(o.ResyncInterval)(), "", func(opts *metav1.ListOptions) { opts.FieldSelector = fmt.Sprintf("metadata.name=%s", o.Name) }) openshiftConfigInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod(o.ResyncInterval)(), informers.WithNamespace(internal.ConfigNamespace)) openshiftConfigManagedInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod(o.ResyncInterval)(), informers.WithNamespace(internal.ConfigManagedNamespace)) sharedInformers := externalversions.NewSharedInformerFactory(client, resyncPeriod(o.ResyncInterval)()) coInformer := sharedInformers.Config().V1().ClusterOperators() ctx := &Context{ CVInformerFactory: cvInformer, OpenshiftConfigInformerFactory: openshiftConfigInformer, OpenshiftConfigManagedInformerFactory: openshiftConfigManagedInformer, InformerFactory: sharedInformers, CVO: cvo.New( o.NodeName, o.Namespace, o.Name, o.ReleaseImage, o.EnableDefaultClusterVersion, o.PayloadOverride, resyncPeriod(o.ResyncInterval)(), cvInformer.Config().V1().ClusterVersions(), coInformer, openshiftConfigInformer.Core().V1().ConfigMaps(), openshiftConfigManagedInformer.Core().V1().ConfigMaps(), sharedInformers.Config().V1().Proxies(), cb.ClientOrDie(o.Namespace), cb.KubeClientOrDie(o.Namespace, useProtobuf), o.Exclude, o.ClusterProfile, ), } if o.EnableAutoUpdate { ctx.AutoUpdate = autoupdate.New( o.Namespace, o.Name, cvInformer.Config().V1().ClusterVersions(), sharedInformers.Config().V1().ClusterOperators(), cb.ClientOrDie(o.Namespace), cb.KubeClientOrDie(o.Namespace), ) } if o.ListenAddr != "" { if err := ctx.CVO.RegisterMetrics(coInformer.Informer()); err != nil { panic(err) } } return ctx }
[ "\"NODE_NAME\"", "\"PAYLOAD_OVERRIDE\"", "\"EXCLUDE_MANIFESTS\"" ]
[]
[ "EXCLUDE_MANIFESTS", "PAYLOAD_OVERRIDE", "NODE_NAME" ]
[]
["EXCLUDE_MANIFESTS", "PAYLOAD_OVERRIDE", "NODE_NAME"]
go
3
0
tools/tiny-test-fw/CIAssignUnitTest.py
""" Command line tool to assign unit tests to CI test jobs. """ import re import os import sys import argparse import yaml test_fw_path = os.getenv("TEST_FW_PATH") if test_fw_path: sys.path.insert(0, test_fw_path) from Utility import CIAssignTest class Group(CIAssignTest.Group): SORT_KEYS = ["Test App", "SDK", "test environment", "multi_device", "multi_stage"] CI_JOB_MATCH_KEYS = ["Test App", "SDK", "test environment"] MAX_CASE = 30 ATTR_CONVERT_TABLE = { "execution_time": "execution time" } @staticmethod def _get_case_attr(case, attr): if attr in Group.ATTR_CONVERT_TABLE: attr = Group.ATTR_CONVERT_TABLE[attr] return case[attr] @staticmethod def _get_ut_config(test_app): # we format test app "UT_ + config" when parsing test cases # now we need to extract config assert test_app[:3] == "UT_" return test_app[3:] def _create_extra_data(self, test_function): case_data = [] for case in self.case_list: one_case_data = { "config": self._get_ut_config(self._get_case_attr(case, "Test App")), "name": self._get_case_attr(case, "summary"), "reset": self._get_case_attr(case, "reset"), } if test_function in ["run_multiple_devices_cases", "run_multiple_stage_cases"]: try: one_case_data["child case num"] = self._get_case_attr(case, "child case num") except KeyError as e: print("multiple devices/stages cases must contains at least two test functions") print("case name: {}".format(one_case_data["name"])) raise e case_data.append(one_case_data) return case_data def _map_test_function(self): """ determine which test function to use according to current test case :return: test function name to use """ if self.filters["multi_device"] == "Yes": test_function = "run_multiple_devices_cases" elif self.filters["multi_stage"] == "Yes": test_function = "run_multiple_stage_cases" else: test_function = "run_unit_test_cases" return test_function def output(self): """ output data for job configs :return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group} """ test_function = self._map_test_function() output_data = { # we don't need filter for test function, as UT uses a few test functions for all cases "CaseConfig": [ { "name": test_function, "extra_data": self._create_extra_data(test_function), } ] } return output_data class UnitTestAssignTest(CIAssignTest.AssignTest): CI_TEST_JOB_PATTERN = re.compile(r"^UT_.+") def __init__(self, test_case_path, ci_config_file): CIAssignTest.AssignTest.__init__(self, test_case_path, ci_config_file, case_group=Group) @staticmethod def _search_cases(test_case_path, case_filter=None): """ For unit test case, we don't search for test functions. The unit test cases is stored in a yaml file which is created in job build-idf-test. """ with open(test_case_path, "r") as f: raw_data = yaml.load(f) test_cases = raw_data["test cases"] if case_filter: for key in case_filter: filtered_cases = [] for case in test_cases: try: # bot converts string to lower case if isinstance(case[key], str): _value = case[key].lower() else: _value = case[key] if _value in case_filter[key]: filtered_cases.append(case) except KeyError: # case don't have this key, regard as filter success filtered_cases.append(case) test_cases = filtered_cases return test_cases if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("test_case", help="test case folder or file") parser.add_argument("ci_config_file", help="gitlab ci config file") parser.add_argument("output_path", help="output path of config files") args = parser.parse_args() assign_test = UnitTestAssignTest(args.test_case, args.ci_config_file) assign_test.assign_cases() assign_test.output_configs(args.output_path)
[]
[]
[ "TEST_FW_PATH" ]
[]
["TEST_FW_PATH"]
python
1
0
test/extended/util/framework.go
package util import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "io/ioutil" "net" "os" "path" "path/filepath" "regexp" "strconv" "strings" "sync" "time" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" authorizationapi "k8s.io/api/authorization/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/apitesting" kapierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" quotav1 "k8s.io/kubernetes/pkg/quota/v1" e2e "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/utils/image" buildv1 "github.com/openshift/api/build/v1" configv1 "github.com/openshift/api/config/v1" imagev1 "github.com/openshift/api/image/v1" operatorv1 "github.com/openshift/api/operator/v1" securityv1 "github.com/openshift/api/security/v1" buildv1clienttyped "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" projectv1typedclient "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1" "github.com/openshift/library-go/pkg/build/naming" "github.com/openshift/library-go/pkg/git" "github.com/openshift/library-go/pkg/image/imageutil" "github.com/openshift/origin/test/extended/testdata" "github.com/openshift/origin/test/extended/util/ibmcloud" ) // WaitForInternalRegistryHostname waits for the internal registry hostname to be made available to the cluster. func WaitForInternalRegistryHostname(oc *CLI) (string, error) { ctx := context.Background() e2e.Logf("Waiting up to 2 minutes for the internal registry hostname to be published") var registryHostname string foundOCMLogs := false isOCMProgressing := true podLogs := map[string]string{} isIBMCloud := e2e.TestContext.Provider == ibmcloud.ProviderName testImageStreamName := "" if isIBMCloud { is := &imagev1.ImageStream{} is.GenerateName = "internal-registry-test" is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Create(context.Background(), is, metav1.CreateOptions{}) if err != nil { e2e.Logf("Error creating internal registry test imagestream: %v", err) return "", err } testImageStreamName = is.Name defer func() { err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Delete(context.Background(), is.Name, metav1.DeleteOptions{}) if err != nil { e2e.Logf("Failed to cleanup internal-registry-test imagestream") } }() } err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { imageConfig, err := oc.AsAdmin().AdminConfigClient().ConfigV1().Images().Get(ctx, "cluster", metav1.GetOptions{}) if err != nil { if kapierrs.IsNotFound(err) { e2e.Logf("Image config object not found") return false, nil } e2e.Logf("Error accessing image config object: %#v", err) return false, err } if imageConfig == nil { e2e.Logf("Image config object nil") return false, nil } registryHostname = imageConfig.Status.InternalRegistryHostname if len(registryHostname) == 0 { e2e.Logf("Internal Registry Hostname is not set in image config object") return false, nil } if len(testImageStreamName) > 0 { is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Get(context.Background(), testImageStreamName, metav1.GetOptions{}) if err != nil { e2e.Logf("Failed to fetch test imagestream openshift/%s: %v", testImageStreamName, err) return false, err } if len(is.Status.DockerImageRepository) == 0 { return false, nil } imgRef, err := imageutil.ParseDockerImageReference(is.Status.DockerImageRepository) if err != nil { e2e.Logf("Failed to parse dockerimage repository in test imagestream (%s): %v", is.Status.DockerImageRepository, err) return false, err } if imgRef.Registry != registryHostname { return false, nil } return true, nil } // verify that the OCM config's internal registry hostname matches // the image config's internal registry hostname ocm, err := oc.AdminOperatorClient().OperatorV1().OpenShiftControllerManagers().Get(ctx, "cluster", metav1.GetOptions{}) if err != nil { if kapierrs.IsNotFound(err) { return false, nil } return false, err } observedConfig := map[string]interface{}{} err = json.Unmarshal(ocm.Spec.ObservedConfig.Raw, &observedConfig) if err != nil { return false, nil } internalRegistryHostnamePath := []string{"dockerPullSecret", "internalRegistryHostname"} currentRegistryHostname, _, err := unstructured.NestedString(observedConfig, internalRegistryHostnamePath...) if err != nil { e2e.Logf("error procesing observed config %#v", err) return false, nil } if currentRegistryHostname != registryHostname { e2e.Logf("OCM observed config hostname %s does not match image config hostname %s", currentRegistryHostname, registryHostname) return false, nil } // check pod logs for messages around image config's internal registry hostname has been observed and // and that the build controller was started after that observation pods, err := oc.AdminKubeClient().CoreV1().Pods("openshift-controller-manager").List(ctx, metav1.ListOptions{}) if err != nil { if kapierrs.IsNotFound(err) { return false, nil } return false, err } for _, pod := range pods.Items { req := oc.AdminKubeClient().CoreV1().Pods("openshift-controller-manager").GetLogs(pod.Name, &corev1.PodLogOptions{}) readCloser, err := req.Stream(ctx) if err == nil { b, err := ioutil.ReadAll(readCloser) if err == nil { podLog := string(b) podLogs[pod.Name] = podLog scanner := bufio.NewScanner(strings.NewReader(podLog)) firstLog := false for scanner.Scan() { line := scanner.Text() if strings.Contains(line, "build_controller.go") && strings.Contains(line, "Starting build controller") { firstLog = true continue } if firstLog && strings.Contains(line, "build_controller.go") && strings.Contains(line, registryHostname) { e2e.Logf("the OCM pod logs indicate the build controller was started after the internal registry hostname has been set in the OCM config") foundOCMLogs = true break } } } } else { e2e.Logf("error getting pod logs: %#v", err) } } if !foundOCMLogs { e2e.Logf("did not find the sequence in the OCM pod logs around the build controller getting started after the internal registry hostname has been set in the OCM config") return false, nil } if !isOCMProgressing { return true, nil } // now cycle through the OCM operator conditions and make sure the Progressing condition is done for _, condition := range ocm.Status.Conditions { if condition.Type != operatorv1.OperatorStatusTypeProgressing { continue } if condition.Status != operatorv1.ConditionFalse { e2e.Logf("OCM rollout still progressing or in error: %v", condition.Status) return false, nil } e2e.Logf("OCM rollout progressing status reports complete") isOCMProgressing = true return true, nil } e2e.Logf("OCM operator progressing condition not present yet") return false, nil }) if !foundOCMLogs && !isIBMCloud { e2e.Logf("dumping OCM pod logs since we never found the internal registry hostname and start build controller sequence") for podName, podLog := range podLogs { e2e.Logf("pod %s logs:\n%s", podName, podLog) } } if err == wait.ErrWaitTimeout { return "", fmt.Errorf("Timed out waiting for Openshift Controller Manager to be rolled out with updated internal registry hostname") } if err != nil { return "", err } return registryHostname, nil } func processScanError(log string) error { e2e.Logf(log) return fmt.Errorf(log) } // WaitForOpenShiftNamespaceImageStreams waits for the standard set of imagestreams to be imported func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error { ctx := context.Background() // First wait for the internal registry hostname to be published registryHostname, err := WaitForInternalRegistryHostname(oc) if err != nil { return err } langs := []string{"ruby", "nodejs", "perl", "php", "python", "mysql", "postgresql", "mongodb", "jenkins"} scan := func() error { // check the samples operator to see about imagestream import status samplesOperatorConfig, err := oc.AdminConfigClient().ConfigV1().ClusterOperators().Get(ctx, "openshift-samples", metav1.GetOptions{}) if err != nil { return processScanError(fmt.Sprintf("Samples Operator ClusterOperator Error: %#v", err)) } for _, condition := range samplesOperatorConfig.Status.Conditions { switch { case condition.Type == configv1.OperatorDegraded && condition.Status == configv1.ConditionTrue: // if degraded, bail ... unexpected results can ensue return processScanError("SamplesOperator degraded!!!") case condition.Type == configv1.OperatorProgressing: // if the imagestreams for one of our langs above failed, we abort, // but if it is for say only EAP streams, we allow if condition.Reason == "FailedImageImports" { msg := condition.Message for _, lang := range langs { if strings.Contains(msg, " "+lang+" ") || strings.HasSuffix(msg, " "+lang) { e2e.Logf("SamplesOperator detected error during imagestream import: %s with details %s", condition.Reason, condition.Message) stream, err := oc.AsAdmin().ImageClient().ImageV1().ImageStreams("openshift").Get(ctx, lang, metav1.GetOptions{}) if err != nil { return processScanError(fmt.Sprintf("after seeing FailedImageImports for %s retrieval failed with %s", lang, err.Error())) } isi := &imagev1.ImageStreamImport{} isi.Name = lang isi.Namespace = "openshift" isi.ResourceVersion = stream.ResourceVersion isi.Spec = imagev1.ImageStreamImportSpec{ Import: true, Images: []imagev1.ImageImportSpec{}, } for _, tag := range stream.Spec.Tags { if tag.From != nil && tag.From.Kind == "DockerImage" { iis := imagev1.ImageImportSpec{} iis.From = *tag.From iis.To = &corev1.LocalObjectReference{Name: tag.Name} isi.Spec.Images = append(isi.Spec.Images, iis) } } _, err = oc.AsAdmin().ImageClient().ImageV1().ImageStreamImports("openshift").Create(ctx, isi, metav1.CreateOptions{}) if err != nil { return processScanError(fmt.Sprintf("after seeing FailedImageImports for %s the manual image import failed with %s", lang, err.Error())) } return processScanError(fmt.Sprintf("after seeing FailedImageImports for %s a manual image-import was submitted", lang)) } } } if condition.Status == configv1.ConditionTrue { // updates still in progress ... not "ready" return processScanError(fmt.Sprintf("SamplesOperator still in progress")) } case condition.Type == configv1.OperatorAvailable && condition.Status == configv1.ConditionFalse: return processScanError(fmt.Sprintf("SamplesOperator not available")) default: e2e.Logf("SamplesOperator at steady state") } } for _, lang := range langs { e2e.Logf("Checking language %v \n", lang) is, err := oc.ImageClient().ImageV1().ImageStreams("openshift").Get(ctx, lang, metav1.GetOptions{}) if err != nil { return processScanError(fmt.Sprintf("ImageStream Error: %#v \n", err)) } if !strings.HasPrefix(is.Status.DockerImageRepository, registryHostname) { return processScanError(fmt.Sprintf("ImageStream repository %s does not match expected host %s \n", is.Status.DockerImageRepository, registryHostname)) } for _, tag := range is.Spec.Tags { e2e.Logf("Checking tag %v \n", tag) if _, found := imageutil.StatusHasTag(is, tag.Name); !found { return processScanError(fmt.Sprintf("Tag Error: %#v \n", tag)) } } } return nil } // with the move to ocp/rhel as the default for the samples in 4.0, there are alot more imagestreams; // if by some chance this path runs very soon after the cluster has come up, the original time out would // not be sufficient; // so we've bumped what was 30 seconds to 2 min 30 seconds or 150 seconds (manual perf testing shows typical times of // 1 to 2 minutes, assuming registry.access.redhat.com / registry.redhat.io are behaving ... they // have proven less reliable that docker.io) // we've also determined that e2e-aws-image-ecosystem can be started before all the operators have completed; while // that is getting sorted out, the longer time will help there as well e2e.Logf("Scanning openshift ImageStreams \n") var scanErr error pollErr := wait.Poll(10*time.Second, 150*time.Second, func() (bool, error) { scanErr = scan() if scanErr != nil { return false, nil } return true, nil }) if pollErr == nil { e2e.Logf("Success! \n") return nil } DumpImageStreams(oc) DumpSampleOperator(oc) errorString := "" if strings.Contains(scanErr.Error(), "FailedImageImports") { strbuf := bytes.Buffer{} strbuf.WriteString(fmt.Sprintf("Issues exist pulling images from registry.redhat.io: %s\n", scanErr.Error())) strbuf.WriteString(" - check status at https://status.redhat.com (catalog.redhat.com) for reported outages\n") strbuf.WriteString(" - if no outages reported there, email [email protected] with a report of the error\n") strbuf.WriteString(" and prepare to work with the test platform team to get the current set of tokens for CI\n") errorString = strbuf.String() } else { errorString = fmt.Sprintf("Failed to import expected imagestreams, latest error status: %s", scanErr.Error()) } return fmt.Errorf(errorString) } //DumpImageStreams will dump both the openshift namespace and local namespace imagestreams // as part of debugging when the language imagestreams in the openshift namespace seem to disappear func DumpImageStreams(oc *CLI) { out, err := oc.AsAdmin().Run("get").Args("is", "-n", "openshift", "-o", "yaml").Output() if err == nil { e2e.Logf("\n imagestreams in openshift namespace: \n%s\n", out) } else { e2e.Logf("\n error on getting imagestreams in openshift namespace: %+v\n%#v\n", err, out) } out, err = oc.AsAdmin().Run("get").Args("is", "-o", "yaml").Output() if err == nil { e2e.Logf("\n imagestreams in dynamic test namespace: \n%s\n", out) } else { e2e.Logf("\n error on getting imagestreams in dynamic test namespace: %+v\n%#v\n", err, out) } ids, err := ListImages() if err != nil { e2e.Logf("\n got error on container images %+v\n", err) } else { for _, id := range ids { e2e.Logf(" found local image %s\n", id) } } } func DumpSampleOperator(oc *CLI) { out, err := oc.AsAdmin().Run("get").Args("configs.samples.operator.openshift.io", "cluster", "-o", "yaml").Output() if err == nil { e2e.Logf("\n samples operator CR: \n%s\n", out) } else { e2e.Logf("\n error on getting samples operator CR: %+v\n%#v\n", err, out) } DumpPodLogsStartingWithInNamespace("cluster-samples-operator", "openshift-cluster-samples-operator", oc) } // DumpBuildLogs will dump the latest build logs for a BuildConfig for debug purposes func DumpBuildLogs(bc string, oc *CLI) { buildOutput, err := oc.AsAdmin().Run("logs").Args("-f", "bc/"+bc, "--timestamps").Output() if err == nil { e2e.Logf("\n\n build logs : %s\n\n", buildOutput) } else { e2e.Logf("\n\n got error on build logs %+v\n\n", err) } // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts ExamineDiskUsage() ExaminePodDiskUsage(oc) } // DumpBuilds will dump the yaml for every build in the test namespace; remember, pipeline builds // don't have build pods so a generic framework dump won't cat our pipeline builds objs in openshift func DumpBuilds(oc *CLI) { buildOutput, err := oc.AsAdmin().Run("get").Args("builds", "-o", "yaml").Output() if err == nil { e2e.Logf("\n\n builds yaml:\n%s\n\n", buildOutput) } else { e2e.Logf("\n\n got error on build yaml dump: %#v\n\n", err) } } func GetStatefulSetPods(oc *CLI, setName string) (*corev1.PodList, error) { return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("name=%s", setName)).String()}) } // DumpPodStates dumps the state of all pods in the CLI's current namespace. func DumpPodStates(oc *CLI) { e2e.Logf("Dumping pod state for namespace %s", oc.Namespace()) out, err := oc.AsAdmin().Run("get").Args("pods", "-o", "yaml").Output() if err != nil { e2e.Logf("Error dumping pod states: %v", err) return } e2e.Logf(out) } // DumpPodStatesInNamespace dumps the state of all pods in the provided namespace. func DumpPodStatesInNamespace(namespace string, oc *CLI) { e2e.Logf("Dumping pod state for namespace %s", namespace) out, err := oc.AsAdmin().Run("get").Args("pods", "-n", namespace, "-o", "yaml").Output() if err != nil { e2e.Logf("Error dumping pod states: %v", err) return } e2e.Logf(out) } // DumpPodLogsStartingWith will dump any pod starting with the name prefix provided func DumpPodLogsStartingWith(prefix string, oc *CLI) { podsToDump := []corev1.Pod{} podList, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{}) if err != nil { e2e.Logf("Error listing pods: %v", err) return } for _, pod := range podList.Items { if strings.HasPrefix(pod.Name, prefix) { podsToDump = append(podsToDump, pod) } } if len(podsToDump) > 0 { DumpPodLogs(podsToDump, oc) } } // DumpPodLogsStartingWith will dump any pod starting with the name prefix provided func DumpPodLogsStartingWithInNamespace(prefix, namespace string, oc *CLI) { podsToDump := []corev1.Pod{} podList, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { e2e.Logf("Error listing pods: %v", err) return } for _, pod := range podList.Items { if strings.HasPrefix(pod.Name, prefix) { podsToDump = append(podsToDump, pod) } } if len(podsToDump) > 0 { DumpPodLogs(podsToDump, oc) } } func DumpPodLogs(pods []corev1.Pod, oc *CLI) { for _, pod := range pods { descOutput, err := oc.AsAdmin().Run("describe").WithoutNamespace().Args("pod/"+pod.Name, "-n", pod.Namespace).Output() if err == nil { e2e.Logf("Describing pod %q\n%s\n\n", pod.Name, descOutput) } else { e2e.Logf("Error retrieving description for pod %q: %v\n\n", pod.Name, err) } dumpContainer := func(container *corev1.Container) { depOutput, err := oc.AsAdmin().Run("logs").WithoutNamespace().Args("pod/"+pod.Name, "-c", container.Name, "-n", pod.Namespace).Output() if err == nil { e2e.Logf("Log for pod %q/%q\n---->\n%s\n<----end of log for %[1]q/%[2]q\n", pod.Name, container.Name, depOutput) } else { e2e.Logf("Error retrieving logs for pod %q/%q: %v\n\n", pod.Name, container.Name, err) } } for _, c := range pod.Spec.InitContainers { dumpContainer(&c) } for _, c := range pod.Spec.Containers { dumpContainer(&c) } } } // DumpPodsCommand runs the provided command in every pod identified by selector in the provided namespace. func DumpPodsCommand(c kubernetes.Interface, ns string, selector labels.Selector, cmd string) { podList, err := c.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: selector.String()}) o.Expect(err).NotTo(o.HaveOccurred()) values := make(map[string]string) for _, pod := range podList.Items { stdout, err := e2e.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulset.StatefulSetPoll, statefulset.StatefulPodTimeout) o.Expect(err).NotTo(o.HaveOccurred()) values[pod.Name] = stdout } for name, stdout := range values { stdout = strings.TrimSuffix(stdout, "\n") e2e.Logf(name + ": " + strings.Join(strings.Split(stdout, "\n"), fmt.Sprintf("\n%s: ", name))) } } // DumpConfigMapStates dumps the state of all ConfigMaps in the CLI's current namespace. func DumpConfigMapStates(oc *CLI) { e2e.Logf("Dumping configMap state for namespace %s", oc.Namespace()) out, err := oc.AsAdmin().Run("get").Args("configmaps", "-o", "yaml").Output() if err != nil { e2e.Logf("Error dumping configMap states: %v", err) return } e2e.Logf(out) } // GetMasterThreadDump will get a golang thread stack dump func GetMasterThreadDump(oc *CLI) { out, err := oc.AsAdmin().Run("get").Args("--raw", "/debug/pprof/goroutine?debug=2").Output() if err == nil { e2e.Logf("\n\n Master thread stack dump:\n\n%s\n\n", string(out)) return } e2e.Logf("\n\n got error on oc get --raw /debug/pprof/goroutine?godebug=2: %v\n\n", err) } func PreTestDump() { // dump any state we want to know prior to running tests } // ExamineDiskUsage will dump df output on the testing system; leveraging this as part of diagnosing // the registry's disk filling up during external tests on jenkins func ExamineDiskUsage() { // disabling this for now, easier to do it here than everywhere that's calling it. return /* out, err := exec.Command("/bin/df", "-m").Output() if err == nil { e2e.Logf("\n\n df -m output: %s\n\n", string(out)) } else { e2e.Logf("\n\n got error on df %v\n\n", err) } DumpDockerInfo() */ } // ExaminePodDiskUsage will dump df/du output on registry pod; leveraging this as part of diagnosing // the registry's disk filling up during external tests on jenkins func ExaminePodDiskUsage(oc *CLI) { // disabling this for now, easier to do it here than everywhere that's calling it. return /* out, err := oc.Run("get").Args("pods", "-o", "json", "-n", "default").Output() var podName string if err == nil { b := []byte(out) var list corev1.PodList err = json.Unmarshal(b, &list) if err == nil { for _, pod := range list.Items { e2e.Logf("\n\n looking at pod %s \n\n", pod.ObjectMeta.Name) if strings.Contains(pod.ObjectMeta.Name, "docker-registry-") && !strings.Contains(pod.ObjectMeta.Name, "deploy") { podName = pod.ObjectMeta.Name break } } } else { e2e.Logf("\n\n got json unmarshal err: %v\n\n", err) } } else { e2e.Logf("\n\n got error on get pods: %v\n\n", err) } if len(podName) == 0 { e2e.Logf("Unable to determine registry pod name, so we can't examine its disk usage.") return } out, err = oc.Run("exec").Args("-n", "default", podName, "df").Output() if err == nil { e2e.Logf("\n\n df from registry pod: \n%s\n\n", out) } else { e2e.Logf("\n\n got error on reg pod df: %v\n", err) } out, err = oc.Run("exec").Args("-n", "default", podName, "du", "/registry").Output() if err == nil { e2e.Logf("\n\n du from registry pod: \n%s\n\n", out) } else { e2e.Logf("\n\n got error on reg pod du: %v\n", err) } */ } // VarSubOnFile reads in srcFile, finds instances of ${key} from the map // and replaces them with their associated values. func VarSubOnFile(srcFile string, destFile string, vars map[string]string) error { srcData, err := ioutil.ReadFile(srcFile) if err == nil { srcString := string(srcData) for k, v := range vars { k = "${" + k + "}" srcString = strings.Replace(srcString, k, v, -1) // -1 means unlimited replacements } err = ioutil.WriteFile(destFile, []byte(srcString), 0644) } return err } // StartBuild executes OC start-build with the specified arguments. StdOut and StdErr from the process // are returned as separate strings. func StartBuild(oc *CLI, args ...string) (stdout, stderr string, err error) { stdout, stderr, err = oc.Run("start-build").Args(args...).Outputs() e2e.Logf("\n\nstart-build output with args %v:\nError>%v\nStdOut>\n%s\nStdErr>\n%s\n\n", args, err, stdout, stderr) return stdout, stderr, err } var buildPathPattern = regexp.MustCompile(`^build\.build\.openshift\.io/([\w\-\._]+)$`) type LogDumperFunc func(oc *CLI, br *BuildResult) (string, error) func NewBuildResult(oc *CLI, build *buildv1.Build) *BuildResult { return &BuildResult{ Oc: oc, BuildName: build.Name, BuildPath: "builds/" + build.Name, } } type BuildResult struct { // BuildPath is a resource qualified name (e.g. "build/test-1"). BuildPath string // BuildName is the non-resource qualified name. BuildName string // StartBuildStdErr is the StdErr output generated by oc start-build. StartBuildStdErr string // StartBuildStdOut is the StdOut output generated by oc start-build. StartBuildStdOut string // StartBuildErr is the error, if any, returned by the direct invocation of the start-build command. StartBuildErr error // The buildconfig which generated this build. BuildConfigName string // Build is the resource created. May be nil if there was a timeout. Build *buildv1.Build // BuildAttempt represents that a Build resource was created. // false indicates a severe error unrelated to Build success or failure. BuildAttempt bool // BuildSuccess is true if the build was finshed successfully. BuildSuccess bool // BuildFailure is true if the build was finished with an error. BuildFailure bool // BuildCancelled is true if the build was canceled. BuildCancelled bool // BuildTimeout is true if there was a timeout waiting for the build to finish. BuildTimeout bool // Alternate log dumper function. If set, this is called instead of 'oc logs' LogDumper LogDumperFunc // The openshift client which created this build. Oc *CLI } // DumpLogs sends logs associated with this BuildResult to the GinkgoWriter. func (t *BuildResult) DumpLogs() { e2e.Logf("\n\n*****************************************\n") e2e.Logf("Dumping Build Result: %#v\n", *t) if t == nil { e2e.Logf("No build result available!\n\n") return } desc, err := t.Oc.Run("describe").Args(t.BuildPath).Output() e2e.Logf("\n** Build Description:\n") if err != nil { e2e.Logf("Error during description retrieval: %+v\n", err) } else { e2e.Logf("%s\n", desc) } e2e.Logf("\n** Build Logs:\n") buildOuput, err := t.Logs() if err != nil { e2e.Logf("Error during log retrieval: %+v\n", err) } else { e2e.Logf("%s\n", buildOuput) } e2e.Logf("\n\n") t.dumpRegistryLogs() // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts /* ExamineDiskUsage() ExaminePodDiskUsage(t.oc) e2e.Logf( "\n\n") */ } func (t *BuildResult) dumpRegistryLogs() { var buildStarted *time.Time oc := t.Oc e2e.Logf("\n** Registry Logs:\n") if t.Build != nil && !t.Build.CreationTimestamp.IsZero() { buildStarted = &t.Build.CreationTimestamp.Time } else { proj, err := oc.ProjectClient().ProjectV1().Projects().Get(context.Background(), oc.Namespace(), metav1.GetOptions{}) if err != nil { e2e.Logf("Failed to get project %s: %v\n", oc.Namespace(), err) } else { buildStarted = &proj.CreationTimestamp.Time } } if buildStarted == nil { e2e.Logf("Could not determine test' start time\n\n\n") return } since := time.Now().Sub(*buildStarted) // Changing the namespace on the derived client still changes it on the original client // because the kubeFramework field is only copied by reference. Saving the original namespace // here so we can restore it when done with registry logs // TODO remove the default/docker-registry log retrieval when we are fully migrated to 4.0 for our test env. savedNamespace := t.Oc.Namespace() oadm := t.Oc.AsAdmin().SetNamespace("default") out, err := oadm.Run("logs").Args("dc/docker-registry", "--since="+since.String()).Output() if err != nil { e2e.Logf("Error during log retrieval: %+v\n", err) } else { e2e.Logf("%s\n", out) } oadm = t.Oc.AsAdmin().SetNamespace("openshift-image-registry") out, err = oadm.Run("logs").Args("deployment/image-registry", "--since="+since.String()).Output() if err != nil { e2e.Logf("Error during log retrieval: %+v\n", err) } else { e2e.Logf("%s\n", out) } t.Oc.SetNamespace(savedNamespace) e2e.Logf("\n\n") } // Logs returns the logs associated with this build. func (t *BuildResult) Logs() (string, error) { if t == nil || t.BuildPath == "" { return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t) } if t.LogDumper != nil { return t.LogDumper(t.Oc, t) } buildOuput, buildErr, err := t.Oc.Run("logs").Args("-f", t.BuildPath, "--timestamps", "--v", "10").Outputs() if err != nil { return "", fmt.Errorf("Error retrieving logs for build %q: (%s) %v", t.BuildName, buildErr, err) } return buildOuput, nil } // LogsNoTimestamp returns the logs associated with this build. func (t *BuildResult) LogsNoTimestamp() (string, error) { if t == nil || t.BuildPath == "" { return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t) } if t.LogDumper != nil { return t.LogDumper(t.Oc, t) } buildOuput, buildErr, err := t.Oc.Run("logs").Args("-f", t.BuildPath).Outputs() if err != nil { return "", fmt.Errorf("Error retrieving logs for build %q: (%s) %v", t.BuildName, buildErr, err) } return buildOuput, nil } // Dumps logs and triggers a Ginkgo assertion if the build did NOT succeed. func (t *BuildResult) AssertSuccess() *BuildResult { if !t.BuildSuccess { t.DumpLogs() } o.ExpectWithOffset(1, t.BuildSuccess).To(o.BeTrue()) return t } // Dumps logs and triggers a Ginkgo assertion if the build did NOT have an error (this will not assert on timeouts) func (t *BuildResult) AssertFailure() *BuildResult { if !t.BuildFailure { t.DumpLogs() } o.ExpectWithOffset(1, t.BuildFailure).To(o.BeTrue()) return t } func StartBuildResult(oc *CLI, args ...string) (result *BuildResult, err error) { args = append(args, "-o=name") // ensure that the build name is the only thing send to stdout stdout, stderr, err := StartBuild(oc, args...) // Usually, with -o=name, we only expect the build path. // However, the caller may have added --follow which can add // content to stdout. So just grab the first line. buildPath := strings.TrimSpace(strings.Split(stdout, "\n")[0]) result = &BuildResult{ Build: nil, BuildPath: buildPath, StartBuildStdOut: stdout, StartBuildStdErr: stderr, StartBuildErr: nil, BuildAttempt: false, BuildSuccess: false, BuildFailure: false, BuildCancelled: false, BuildTimeout: false, Oc: oc, } // An error here does not necessarily mean we could not run start-build. For example // when --wait is specified, start-build returns an error if the build fails. Therefore, // we continue to collect build information even if we see an error. result.StartBuildErr = err matches := buildPathPattern.FindStringSubmatch(buildPath) if len(matches) != 2 { return result, fmt.Errorf("Build path output did not match expected format 'build/name' : %q", buildPath) } result.BuildName = matches[1] return result, nil } // StartBuildAndWait executes OC start-build with the specified arguments on an existing buildconfig. // Note that start-build will be run with "-o=name" as a parameter when using this method. // If no error is returned from this method, it means that the build attempted successfully, NOT that // the build completed. For completion information, check the BuildResult object. func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error) { result, err = StartBuildResult(oc, args...) if err != nil { return result, err } return result, WaitForBuildResult(oc.BuildClient().BuildV1().Builds(oc.Namespace()), result) } // WaitForBuildResult updates result wit the state of the build func WaitForBuildResult(c buildv1clienttyped.BuildInterface, result *BuildResult) error { e2e.Logf("Waiting for %s to complete\n", result.BuildName) err := WaitForABuild(c, result.BuildName, func(b *buildv1.Build) bool { result.Build = b result.BuildSuccess = CheckBuildSuccess(b) return result.BuildSuccess }, func(b *buildv1.Build) bool { result.Build = b result.BuildFailure = CheckBuildFailed(b) return result.BuildFailure }, func(b *buildv1.Build) bool { result.Build = b result.BuildCancelled = CheckBuildCancelled(b) return result.BuildCancelled }, ) if result.Build == nil { // We only abort here if the build progress was unobservable. Only known cause would be severe, non-build related error in WaitForABuild. return fmt.Errorf("Severe error waiting for build: %v", err) } result.BuildAttempt = true result.BuildTimeout = !(result.BuildFailure || result.BuildSuccess || result.BuildCancelled) e2e.Logf("Done waiting for %s: %#v\n with error: %v\n", result.BuildName, *result, err) return nil } // WaitForABuild waits for a Build object to match either isOK or isFailed conditions. func WaitForABuild(c buildv1clienttyped.BuildInterface, name string, isOK, isFailed, isCanceled func(*buildv1.Build) bool) error { if isOK == nil { isOK = CheckBuildSuccess } if isFailed == nil { isFailed = CheckBuildFailed } if isCanceled == nil { isCanceled = CheckBuildCancelled } // wait 2 minutes for build to exist err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) { if _, err := c.Get(context.Background(), name, metav1.GetOptions{}); err != nil { return false, nil } return true, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("Timed out waiting for build %q to be created", name) } if err != nil { return err } // wait longer for the build to run to completion err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { list, err := c.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) if err != nil { e2e.Logf("error listing builds: %v", err) return false, err } for i := range list.Items { if name == list.Items[i].Name && (isOK(&list.Items[i]) || isCanceled(&list.Items[i])) { return true, nil } if name != list.Items[i].Name { return false, fmt.Errorf("While listing builds named %s, found unexpected build %#v", name, list.Items[i]) } if isFailed(&list.Items[i]) { return false, fmt.Errorf("The build %q status is %q", name, list.Items[i].Status.Phase) } } return false, nil }) if err != nil { e2e.Logf("WaitForABuild returning with error: %v", err) } if err == wait.ErrWaitTimeout { return fmt.Errorf("Timed out waiting for build %q to complete", name) } return err } // CheckBuildSuccess returns true if the build succeeded func CheckBuildSuccess(b *buildv1.Build) bool { return b.Status.Phase == buildv1.BuildPhaseComplete } // CheckBuildFailed return true if the build failed func CheckBuildFailed(b *buildv1.Build) bool { return b.Status.Phase == buildv1.BuildPhaseFailed || b.Status.Phase == buildv1.BuildPhaseError } // CheckBuildCancelled return true if the build was canceled func CheckBuildCancelled(b *buildv1.Build) bool { return b.Status.Phase == buildv1.BuildPhaseCancelled } // WaitForServiceAccount waits until the named service account gets fully // provisioned func WaitForServiceAccount(c corev1client.ServiceAccountInterface, name string) error { waitFn := func() (bool, error) { sc, err := c.Get(context.Background(), name, metav1.GetOptions{}) if err != nil { // If we can't access the service accounts, let's wait till the controller // create it. if kapierrs.IsNotFound(err) || kapierrs.IsForbidden(err) { e2e.Logf("Waiting for service account %q to be available: %v (will retry) ...", name, err) return false, nil } return false, fmt.Errorf("Failed to get service account %q: %v", name, err) } secretNames := []string{} for _, s := range sc.Secrets { if strings.Contains(s.Name, "dockercfg") { return true, nil } secretNames = append(secretNames, s.Name) } e2e.Logf("Waiting for service account %q secrets (%s) to include dockercfg ...", name, strings.Join(secretNames, ",")) return false, nil } return wait.Poll(100*time.Millisecond, 3*time.Minute, waitFn) } // WaitForNamespaceSCCAnnotations waits up to 2 minutes for the cluster-policy-controller to add the SCC related // annotations to the provided namespace. func WaitForNamespaceSCCAnnotations(c projectv1typedclient.ProjectV1Interface, name string) error { waitFn := func() (bool, error) { proj, err := c.Projects().Get(context.Background(), name, metav1.GetOptions{}) if err != nil { // it is assumed the project was created prior to calling this, so we // do not distinguish not found errors return false, err } if proj.Annotations == nil { return false, nil } for k := range proj.Annotations { // annotations to check based off of // https://github.com/openshift/cluster-policy-controller/blob/master/pkg/security/controller/namespace_scc_allocation_controller.go#L112 if k == securityv1.UIDRangeAnnotation { return true, nil } } e2e.Logf("project %s current annotation set: %#v", name, proj.Annotations) return false, nil } return wait.Poll(time.Duration(15*time.Second), 2*time.Minute, waitFn) } // WaitForAnImageStream waits for an ImageStream to fulfill the isOK function func WaitForAnImageStream(client imagev1typedclient.ImageStreamInterface, name string, isOK, isFailed func(*imagev1.ImageStream) bool) error { for { list, err := client.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) if err != nil { return err } for i := range list.Items { if isOK(&list.Items[i]) { return nil } if isFailed(&list.Items[i]) { return fmt.Errorf("The image stream %q status is %q", name, list.Items[i].Annotations[imagev1.DockerImageRepositoryCheckAnnotation]) } } rv := list.ResourceVersion w, err := client.Watch(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), ResourceVersion: rv}) if err != nil { return err } defer w.Stop() for { val, ok := <-w.ResultChan() if !ok { // reget and re-watch break } if e, ok := val.Object.(*imagev1.ImageStream); ok { if isOK(e) { return nil } if isFailed(e) { return fmt.Errorf("The image stream %q status is %q", name, e.Annotations[imagev1.DockerImageRepositoryCheckAnnotation]) } } } } } // WaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag. // Defaults to waiting for 300 seconds func WaitForAnImageStreamTag(oc *CLI, namespace, name, tag string) error { waitTimeout := time.Second * 300 g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name)) start := time.Now() c := make(chan error) go func() { err := WaitForAnImageStream( oc.ImageClient().ImageV1().ImageStreams(namespace), name, func(is *imagev1.ImageStream) bool { statusTag, exists := imageutil.StatusHasTag(is, tag) if !exists || len(statusTag.Items) == 0 { return false } return true }, func(is *imagev1.ImageStream) bool { return time.Now().After(start.Add(waitTimeout)) }) c <- err }() select { case e := <-c: return e case <-time.After(waitTimeout): return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", namespace, name, tag) } } // CheckImageStreamLatestTagPopulated returns true if the imagestream has a ':latest' tag filed func CheckImageStreamLatestTagPopulated(i *imagev1.ImageStream) bool { _, ok := imageutil.StatusHasTag(i, "latest") return ok } // CheckImageStreamTagNotFound return true if the imagestream update was not successful func CheckImageStreamTagNotFound(i *imagev1.ImageStream) bool { return strings.Contains(i.Annotations[imagev1.DockerImageRepositoryCheckAnnotation], "not") || strings.Contains(i.Annotations[imagev1.DockerImageRepositoryCheckAnnotation], "error") } func isUsageSynced(received, expected corev1.ResourceList, expectedIsUpperLimit bool) bool { resourceNames := quotav1.ResourceNames(expected) masked := quotav1.Mask(received, resourceNames) if len(masked) != len(expected) { return false } if expectedIsUpperLimit { if le, _ := quotav1.LessThanOrEqual(masked, expected); !le { return false } } else { if le, _ := quotav1.LessThanOrEqual(expected, masked); !le { return false } } return true } // WaitForResourceQuotaSync watches given resource quota until its usage is updated to desired level or a // timeout occurs. If successful, used quota values will be returned for expected resources. Otherwise an // ErrWaitTimeout will be returned. If expectedIsUpperLimit is true, given expected usage must compare greater // or equal to quota's usage, which is useful for expected usage increment. Otherwise expected usage must // compare lower or equal to quota's usage, which is useful for expected usage decrement. func WaitForResourceQuotaSync( client corev1client.ResourceQuotaInterface, name string, expectedUsage corev1.ResourceList, expectedIsUpperLimit bool, timeout time.Duration, ) (corev1.ResourceList, error) { startTime := time.Now() endTime := startTime.Add(timeout) expectedResourceNames := quotav1.ResourceNames(expectedUsage) list, err := client.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) if err != nil { return nil, err } for i := range list.Items { used := quotav1.Mask(list.Items[i].Status.Used, expectedResourceNames) if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { return used, nil } } rv := list.ResourceVersion w, err := client.Watch(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), ResourceVersion: rv}) if err != nil { return nil, err } defer w.Stop() for time.Now().Before(endTime) { select { case val, ok := <-w.ResultChan(): if !ok { // reget and re-watch continue } if rq, ok := val.Object.(*corev1.ResourceQuota); ok { used := quotav1.Mask(rq.Status.Used, expectedResourceNames) if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { return used, nil } } case <-time.After(endTime.Sub(time.Now())): return nil, wait.ErrWaitTimeout } } return nil, wait.ErrWaitTimeout } // GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names. func GetPodNamesByFilter(c corev1client.PodInterface, label labels.Selector, predicate func(corev1.Pod) bool) (podNames []string, err error) { podList, err := c.List(context.Background(), metav1.ListOptions{LabelSelector: label.String()}) if err != nil { return nil, err } for _, pod := range podList.Items { if predicate(pod) { podNames = append(podNames, pod.Name) } } return podNames, nil } func WaitForAJob(c batchv1client.JobInterface, name string, timeout time.Duration) error { return wait.Poll(1*time.Second, timeout, func() (bool, error) { j, e := c.Get(context.Background(), name, metav1.GetOptions{}) if e != nil { return true, e } // TODO soltysh: replace this with a function once such exist, currently // it's private in the controller for _, c := range j.Status.Conditions { if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == corev1.ConditionTrue { return true, nil } } return false, nil }) } // WaitForPods waits until given number of pods that match the label selector and // satisfy the predicate are found func WaitForPods(c corev1client.PodInterface, label labels.Selector, predicate func(corev1.Pod) bool, count int, timeout time.Duration) ([]string, error) { var podNames []string err := wait.Poll(1*time.Second, timeout, func() (bool, error) { p, e := GetPodNamesByFilter(c, label, predicate) if e != nil { return true, e } if len(p) != count { return false, nil } podNames = p return true, nil }) return podNames, err } // CheckPodIsRunning returns true if the pod is running func CheckPodIsRunning(pod corev1.Pod) bool { return pod.Status.Phase == corev1.PodRunning } // CheckPodIsSucceeded returns true if the pod status is "Succdeded" func CheckPodIsSucceeded(pod corev1.Pod) bool { return pod.Status.Phase == corev1.PodSucceeded } // CheckPodIsReady returns true if the pod's ready probe determined that the pod is ready. func CheckPodIsReady(pod corev1.Pod) bool { if pod.Status.Phase != corev1.PodRunning { return false } for _, cond := range pod.Status.Conditions { if cond.Type != corev1.PodReady { continue } return cond.Status == corev1.ConditionTrue } return false } // CheckPodNoOp always returns true func CheckPodNoOp(pod corev1.Pod) bool { return true } // WaitUntilPodIsGone waits until the named Pod will disappear func WaitUntilPodIsGone(c corev1client.PodInterface, podName string, timeout time.Duration) error { return wait.Poll(1*time.Second, timeout, func() (bool, error) { _, err := c.Get(context.Background(), podName, metav1.GetOptions{}) if err != nil { if strings.Contains(err.Error(), "not found") { return true, nil } return true, err } return false, nil }) } // GetDockerImageReference retrieves the full Docker pull spec from the given ImageStream // and tag func GetDockerImageReference(c imagev1typedclient.ImageStreamInterface, name, tag string) (string, error) { imageStream, err := c.Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return "", err } isTag, ok := imageutil.StatusHasTag(imageStream, tag) if !ok { return "", fmt.Errorf("ImageStream %q does not have tag %q", name, tag) } return isTag.Items[0].DockerImageReference, nil } // GetPodForContainer creates a new Pod that runs specified container func GetPodForContainer(container corev1.Container) *corev1.Pod { name := naming.GetPodName("test-pod", string(uuid.NewUUID())) return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{"name": name}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{container}, RestartPolicy: corev1.RestartPolicyNever, }, } } // KubeConfigPath returns the value of KUBECONFIG environment variable func KubeConfigPath() string { // can't use gomega in this method since it is used outside of It() return os.Getenv("KUBECONFIG") } //ArtifactDirPath returns the value of ARTIFACT_DIR environment variable func ArtifactDirPath() string { path := os.Getenv("ARTIFACT_DIR") o.Expect(path).NotTo(o.BeNil()) o.Expect(path).NotTo(o.BeEmpty()) return path } //ArtifactPath returns the absolute path to the fix artifact file //The path is relative to ARTIFACT_DIR func ArtifactPath(elem ...string) string { return filepath.Join(append([]string{ArtifactDirPath()}, elem...)...) } func prefixFixturePath(elem []string) []string { switch { case len(elem) == 0: panic("must specify path") case len(elem) > 3 && elem[0] == ".." && elem[1] == ".." && elem[2] == "examples": elem = elem[2:] case len(elem) > 3 && elem[0] == ".." && elem[1] == ".." && elem[2] == "install": elem = elem[2:] case len(elem) > 3 && elem[0] == ".." && elem[1] == "integration": elem = append([]string{"test"}, elem[1:]...) case elem[0] == "testdata": elem = append([]string{"test", "extended"}, elem...) default: panic(fmt.Sprintf("Fixtures must be in test/extended/testdata or examples not %s", path.Join(elem...))) } return elem } // FixturePaths returns the set of paths within the provided fixture directory. func FixturePaths(elem ...string) []string { var paths []string elem = prefixFixturePath(elem) prefix := path.Join(elem...) items, _ := testdata.AssetDir(prefix) for _, item := range items { paths = append(paths, item) } return paths } var ( internalFixtureOnce sync.Once // callers should use fixtureDirectory() instead internalFixtureDir string ) // fixtureDirectory returns the fixture directory for use within this process. // It returns true if the current process was the one to initialize the directory. func fixtureDirectory() (string, bool) { // load or allocate fixture directory var init bool internalFixtureOnce.Do(func() { // reuse fixture directories across child processes for efficiency internalFixtureDir = os.Getenv("OS_TEST_FIXTURE_DIR") if len(internalFixtureDir) == 0 { dir, err := ioutil.TempDir("", "fixture-testdata-dir") if err != nil { panic(err) } internalFixtureDir = dir init = true } }) return internalFixtureDir, init } // FixturePath returns an absolute path to a fixture file in test/extended/testdata/, // test/integration/, or examples/. The contents of the path will not exist until the // test is started. func FixturePath(elem ...string) string { // normalize the element array originalElem := elem elem = prefixFixturePath(elem) relativePath := path.Join(elem...) fixtureDir, _ := fixtureDirectory() fullPath := path.Join(fixtureDir, relativePath) absPath, err := filepath.Abs(fullPath) if err != nil { panic(err) } if testsStarted { // extract the contents to disk if err := testdata.RestoreAssets(fixtureDir, relativePath); err != nil { panic(err) } if err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { if err := os.Chmod(path, 0640); err != nil { return err } if stat, err := os.Lstat(path); err == nil && stat.IsDir() { return os.Chmod(path, 0755) } return nil }); err != nil { panic(err) } } else { // defer extraction of content to a BeforeEach when called before tests start g.BeforeEach(func() { FixturePath(originalElem...) }) } return absPath } // FetchURL grabs the output from the specified url and returns it. // It will retry once per second for duration retryTimeout if an error occurs during the request. func FetchURL(oc *CLI, url string, retryTimeout time.Duration) (string, error) { ns := oc.KubeFramework().Namespace.Name execPodName := CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, string(uuid.NewUUID())) defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPodName, *metav1.NewDeleteOptions(1)) }() execPod, err := oc.AdminKubeClient().CoreV1().Pods(ns).Get(context.Background(), execPodName, metav1.GetOptions{}) if err != nil { return "", err } var response string waitFn := func() (bool, error) { e2e.Logf("Waiting up to %v to wget %s", retryTimeout, url) //cmd := fmt.Sprintf("wget -T 30 -O- %s", url) cmd := fmt.Sprintf("curl -vvv %s", url) response, err = e2e.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { e2e.Logf("got err: %v, retry until timeout", err) return false, nil } // Need to check output because wget -q might omit the error. if strings.TrimSpace(response) == "" { e2e.Logf("got empty stdout, retry until timeout") return false, nil } return true, nil } pollErr := wait.Poll(time.Duration(1*time.Second), retryTimeout, waitFn) if pollErr == wait.ErrWaitTimeout { return "", fmt.Errorf("Timed out while fetching url %q", url) } if pollErr != nil { return "", pollErr } return response, nil } // ParseLabelsOrDie turns the given string into a label selector or // panics; for tests or other cases where you know the string is valid. // TODO: Move this to the upstream labels package. func ParseLabelsOrDie(str string) labels.Selector { ret, err := labels.Parse(str) if err != nil { panic(fmt.Sprintf("cannot parse '%v': %v", str, err)) } return ret } // LaunchWebserverPod launches a pod serving http on port 8080 to act // as the target for networking connectivity checks. The ip address // of the created pod will be returned if the pod is launched // successfully. func LaunchWebserverPod(f *e2e.Framework, podName, nodeName string) (ip string) { containerName := fmt.Sprintf("%s-container", podName) port := 8080 pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: containerName, Image: image.GetE2EImage(image.Agnhost), Args: []string{"netexec", "--http-port", fmt.Sprintf("%d", port)}, Ports: []corev1.ContainerPort{{ContainerPort: int32(port)}}, }, }, NodeName: nodeName, RestartPolicy: corev1.RestartPolicyNever, }, } podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(context.Background(), pod, metav1.CreateOptions{}) e2e.ExpectNoError(err) e2e.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)) createdPod, err := podClient.Get(context.Background(), podName, metav1.GetOptions{}) e2e.ExpectNoError(err) ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port)) e2e.Logf("Target pod IP:port is %s", ip) return } func WaitForEndpoint(c kubernetes.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < 3*time.Minute; time.Sleep(5 * time.Second) { endpoint, err := c.CoreV1().Endpoints(ns).Get(context.Background(), name, metav1.GetOptions{}) if kapierrs.IsNotFound(err) { e2e.Logf("Endpoint %s/%s is not ready yet", ns, name) continue } o.Expect(err).NotTo(o.HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { e2e.Logf("Endpoint %s/%s is not ready yet", ns, name) continue } else { return nil } } return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name) } // GetEndpointAddress will return an "ip:port" string for the endpoint. func GetEndpointAddress(oc *CLI, name string) (string, error) { err := WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), name) if err != nil { return "", err } endpoint, err := oc.KubeClient().CoreV1().Endpoints(oc.Namespace()).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return "", err } return fmt.Sprintf("%s:%d", endpoint.Subsets[0].Addresses[0].IP, endpoint.Subsets[0].Ports[0].Port), nil } // CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. // Returns the name of the created pod. // TODO: expose upstream func CreateExecPodOrFail(client corev1client.CoreV1Interface, ns, name string) string { e2e.Logf("Creating new exec pod") execPod := e2epod.NewExecPodSpec(ns, name, false) created, err := client.Pods(ns).Create(context.Background(), execPod, metav1.CreateOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) err = wait.PollImmediate(e2e.Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.Pods(execPod.Namespace).Get(context.Background(), created.Name, metav1.GetOptions{}) if err != nil { return false, nil } return retrievedPod.Status.Phase == corev1.PodRunning, nil }) o.Expect(err).NotTo(o.HaveOccurred()) return created.Name } // CheckForBuildEvent will poll a build for up to 1 minute looking for an event with // the specified reason and message template. func CheckForBuildEvent(client corev1client.CoreV1Interface, build *buildv1.Build, reason, message string) { scheme, _ := apitesting.SchemeForOrDie(buildv1.Install) var expectedEvent *corev1.Event err := wait.PollImmediate(e2e.Poll, 1*time.Minute, func() (bool, error) { events, err := client.Events(build.Namespace).Search(scheme, build) if err != nil { return false, err } for _, event := range events.Items { e2e.Logf("Found event %#v", event) if reason == event.Reason { expectedEvent = &event return true, nil } } return false, nil }) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Should be able to get events from the build") o.ExpectWithOffset(1, expectedEvent).NotTo(o.BeNil(), "Did not find a %q event on build %s/%s", reason, build.Namespace, build.Name) o.ExpectWithOffset(1, expectedEvent.Message).To(o.Equal(fmt.Sprintf(message, build.Namespace, build.Name))) } type podExecutor struct { client *CLI podName string } // NewPodExecutor returns an executor capable of running commands in a Pod. func NewPodExecutor(oc *CLI, name, image string) (*podExecutor, error) { out, err := oc.Run("run").Args(name, "--labels", "name="+name, "--image", image, "--restart", "Never", "--command", "--", "/bin/bash", "-c", "sleep infinity").Output() if err != nil { return nil, fmt.Errorf("error: %v\n(%s)", err, out) } _, err = WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), ParseLabelsOrDie("name="+name), CheckPodIsReady, 1, 3*time.Minute) if err != nil { return nil, err } return &podExecutor{client: oc, podName: name}, nil } // Exec executes a single command or a bash script in the running pod. It returns the // command output and error if the command finished with non-zero status code or the // command took longer then 3 minutes to run. func (r *podExecutor) Exec(script string) (string, error) { var out string waitErr := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) { var err error out, err = r.client.Run("exec").Args(r.podName, "--", "/bin/bash", "-c", script).Output() return true, err }) return out, waitErr } func (r *podExecutor) CopyFromHost(local, remote string) error { _, err := r.client.Run("cp").Args(local, fmt.Sprintf("%s:%s", r.podName, remote)).Output() return err } // RunOneShotCommandPod runs the given command in a pod and waits for completion and log output for the given timeout // duration, returning the command output or an error. // TODO: merge with the PodExecutor above func RunOneShotCommandPod( oc *CLI, name, image, command string, volumeMounts []corev1.VolumeMount, volumes []corev1.Volume, env []corev1.EnvVar, timeout time.Duration, ) (string, []error) { errs := []error{} cmd := strings.Split(command, " ") args := cmd[1:] var output string pod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Create(context.Background(), newCommandPod(name, image, cmd[0], args, volumeMounts, volumes, env), metav1.CreateOptions{}) if err != nil { return "", []error{err} } // Wait for command completion. err = wait.PollImmediate(1*time.Second, timeout, func() (done bool, err error) { cmdPod, getErr := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), pod.Name, metav1.GetOptions{}) if getErr != nil { e2e.Logf("failed to get pod %q: %v", pod.Name, err) return false, nil } if err := podHasErrored(cmdPod); err != nil { e2e.Logf("pod %q errored trying to run the command: %v", pod.Name, err) return false, err } return podHasCompleted(cmdPod), nil }) if err != nil { errs = append(errs, fmt.Errorf("error waiting for the pod '%s' to complete: %v", pod.Name, err)) } // Gather pod log output err = wait.PollImmediate(1*time.Second, timeout, func() (done bool, err error) { logs, logErr := getPodLogs(oc, pod) if logErr != nil { return false, logErr } if len(logs) == 0 { return false, nil } output = logs return true, nil }) if err != nil { errs = append(errs, fmt.Errorf("command pod %s did not complete: %v", pod.Name, err)) } return output, errs } func podHasCompleted(pod *corev1.Pod) bool { return len(pod.Status.ContainerStatuses) > 0 && pod.Status.ContainerStatuses[0].State.Terminated != nil && pod.Status.ContainerStatuses[0].State.Terminated.Reason == "Completed" } func podHasErrored(pod *corev1.Pod) error { if len(pod.Status.ContainerStatuses) > 0 && pod.Status.ContainerStatuses[0].State.Terminated != nil && pod.Status.ContainerStatuses[0].State.Terminated.Reason == "Error" { return errors.New(pod.Status.ContainerStatuses[0].State.Terminated.Message) } return nil } func getPodLogs(oc *CLI, pod *corev1.Pod) (string, error) { reader, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(context.Background()) if err != nil { return "", err } logs, err := ioutil.ReadAll(reader) if err != nil { return "", err } return string(logs), nil } func newCommandPod(name, image, command string, args []string, volumeMounts []corev1.VolumeMount, volumes []corev1.Volume, env []corev1.EnvVar) *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: corev1.PodSpec{ Volumes: volumes, RestartPolicy: corev1.RestartPolicyOnFailure, Containers: []corev1.Container{ { Name: name, Image: image, Command: []string{command}, Args: args, VolumeMounts: volumeMounts, ImagePullPolicy: "Always", Env: env, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, }, }, }, } } type GitRepo struct { baseTempDir string upstream git.Repository upstreamPath string repo git.Repository RepoPath string } // AddAndCommit commits a file with its content to local repo func (r GitRepo) AddAndCommit(file, content string) error { dir := filepath.Dir(file) if err := os.MkdirAll(filepath.Join(r.RepoPath, dir), 0777); err != nil { return err } if err := ioutil.WriteFile(filepath.Join(r.RepoPath, file), []byte(content), 0666); err != nil { return err } if err := r.repo.Add(r.RepoPath, file); err != nil { return err } if err := r.repo.Commit(r.RepoPath, "added file "+file); err != nil { return err } return nil } // Remove performs cleanup of no longer needed directories with local and "remote" git repo func (r GitRepo) Remove() { if r.baseTempDir != "" { os.RemoveAll(r.baseTempDir) } } // NewGitRepo creates temporary test directories with local and "remote" git repo func NewGitRepo(repoName string) (GitRepo, error) { testDir, err := ioutil.TempDir(os.TempDir(), repoName) if err != nil { return GitRepo{}, err } repoPath := filepath.Join(testDir, repoName) upstreamPath := repoPath + `.git` upstream := git.NewRepository() if err = upstream.Init(upstreamPath, true); err != nil { return GitRepo{baseTempDir: testDir}, err } repo := git.NewRepository() if err = repo.Clone(repoPath, upstreamPath); err != nil { return GitRepo{baseTempDir: testDir}, err } return GitRepo{testDir, upstream, upstreamPath, repo, repoPath}, nil } // WaitForUserBeAuthorized waits a minute until the cluster bootstrap roles are available // and the provided user is authorized to perform the action on the resource. func WaitForUserBeAuthorized(oc *CLI, user, verb, resource string) error { sar := &authorizationapi.SubjectAccessReview{ Spec: authorizationapi.SubjectAccessReviewSpec{ ResourceAttributes: &authorizationapi.ResourceAttributes{ Namespace: oc.Namespace(), Verb: verb, Resource: resource, }, User: user, }, } return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { e2e.Logf("Waiting for user '%v' to be authorized to %v the %v resource", user, verb, resource) resp, err := oc.AdminKubeClient().AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{}) if err == nil && resp != nil && resp.Status.Allowed { return true, nil } if err != nil { e2e.Logf("Error creating SubjectAccessReview: %v", err) } if resp != nil { e2e.Logf("SubjectAccessReview.Status: %#v", resp.Status) } return false, err }) } // GetRouterPodTemplate finds the router pod template across different namespaces, // helping to mitigate the transition from the default namespace to an operator // namespace. func GetRouterPodTemplate(oc *CLI) (*corev1.PodTemplateSpec, string, error) { appsclient := oc.AdminAppsClient().AppsV1() k8sappsclient := oc.AdminKubeClient().AppsV1() for _, ns := range []string{"default", "openshift-ingress", "tectonic-ingress"} { dc, err := appsclient.DeploymentConfigs(ns).Get(context.Background(), "router", metav1.GetOptions{}) if err == nil { return dc.Spec.Template, ns, nil } if !kapierrs.IsNotFound(err) { return nil, "", err } deploy, err := k8sappsclient.Deployments(ns).Get(context.Background(), "router", metav1.GetOptions{}) if err == nil { return &deploy.Spec.Template, ns, nil } if !kapierrs.IsNotFound(err) { return nil, "", err } deploy, err = k8sappsclient.Deployments(ns).Get(context.Background(), "router-default", metav1.GetOptions{}) if err == nil { return &deploy.Spec.Template, ns, nil } if !kapierrs.IsNotFound(err) { return nil, "", err } } return nil, "", kapierrs.NewNotFound(schema.GroupResource{Group: "apps.openshift.io", Resource: "deploymentconfigs"}, "router") } // FindImageFormatString returns a format string for components on the cluster. It returns false // if no format string could be inferred from the cluster. OpenShift 4.0 clusters will not be able // to infer an image format string, so you must wrap this method in one that can locate your specific // image. func FindImageFormatString(oc *CLI) (string, bool) { // legacy support for 3.x clusters template, _, err := GetRouterPodTemplate(oc) if err == nil { if strings.Contains(template.Spec.Containers[0].Image, "haproxy-router") { return strings.Replace(template.Spec.Containers[0].Image, "haproxy-router", "${component}", -1), true } } // in openshift 4.0, no image format can be calculated on cluster return "openshift/origin-${component}:latest", false } func FindCLIImage(oc *CLI) (string, bool) { // look up image stream is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Get(context.Background(), "cli", metav1.GetOptions{}) if err == nil { for _, tag := range is.Spec.Tags { if tag.Name == "latest" && tag.From != nil && tag.From.Kind == "DockerImage" { return tag.From.Name, true } } } format, ok := FindImageFormatString(oc) return strings.Replace(format, "${component}", "cli", -1), ok } func FindRouterImage(oc *CLI) (string, error) { configclient := oc.AdminConfigClient().ConfigV1() o, err := configclient.ClusterOperators().Get(context.Background(), "ingress", metav1.GetOptions{}) if err != nil { return "", err } for _, v := range o.Status.Versions { if v.Name == "ingress-controller" { return v.Version, nil } } return "", fmt.Errorf("expected to find ingress-controller version on clusteroperators/ingress") } func IsClusterOperated(oc *CLI) bool { configclient := oc.AdminConfigClient().ConfigV1() o, err := configclient.Images().Get(context.Background(), "cluster", metav1.GetOptions{}) if o == nil || err != nil { e2e.Logf("Could not find image config object, assuming non-4.0 installed cluster: %v", err) return false } return true }
[ "\"KUBECONFIG\"", "\"ARTIFACT_DIR\"", "\"OS_TEST_FIXTURE_DIR\"" ]
[]
[ "OS_TEST_FIXTURE_DIR", "ARTIFACT_DIR", "KUBECONFIG" ]
[]
["OS_TEST_FIXTURE_DIR", "ARTIFACT_DIR", "KUBECONFIG"]
go
3
0
dev/tasks/crossbow.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import re import time import click import hashlib import gnupg import toolz import pygit2 import github3 import jira.client from io import StringIO from pathlib import Path from textwrap import dedent from datetime import datetime from jinja2 import Template, StrictUndefined from setuptools_scm.git import parse as parse_git_version from ruamel.yaml import YAML CWD = Path(__file__).parent.absolute() NEW_FEATURE = 'New Features and Improvements' BUGFIX = 'Bug Fixes' def md(template, *args, **kwargs): """Wraps string.format with naive markdown escaping""" def escape(s): for char in ('*', '#', '_', '~', '`', '>'): s = s.replace(char, '\\' + char) return s return template.format(*map(escape, args), **toolz.valmap(escape, kwargs)) def unflatten(mapping): result = {} for path, value in mapping.items(): parents, leaf = path[:-1], path[-1] # create the hierarchy until we reach the leaf value temp = result for parent in parents: temp.setdefault(parent, {}) temp = temp[parent] # set the leaf value temp[leaf] = value return result # configurations for setting up branch skipping # - appveyor has a feature to skip builds without an appveyor.yml # - travis reads from the master branch and applies the rules # - circle requires the configuration to be present on all branch, even ones # that are configured to be skipped _default_travis_yml = """ branches: only: - master - /.*-travis-.*/ os: linux dist: trusty language: generic """ _default_circle_yml = """ version: 2 jobs: build: machine: true workflows: version: 2 build: jobs: - build: filters: branches: only: - /.*-circle-.*/ """ _default_tree = { '.travis.yml': _default_travis_yml, '.circleci/config.yml': _default_circle_yml } class JiraChangelog: def __init__(self, version, username, password, server='https://issues.apache.org/jira'): self.server = server # clean version to the first numbers self.version = '.'.join(version.split('.')[:3]) query = ("project=ARROW " "AND fixVersion='{0}' " "AND status = Resolved " "AND resolution in (Fixed, Done) " "ORDER BY issuetype DESC").format(self.version) self.client = jira.client.JIRA({'server': server}, basic_auth=(username, password)) self.issues = self.client.search_issues(query, maxResults=9999) def format_markdown(self): out = StringIO() issues_by_type = toolz.groupby(lambda i: i.fields.issuetype.name, self.issues) for typename, issues in sorted(issues_by_type.items()): issues.sort(key=lambda x: x.key) out.write(md('## {}\n\n', typename)) for issue in issues: out.write(md('* {} - {}\n', issue.key, issue.fields.summary)) out.write('\n') return out.getvalue() def format_website(self): # jira category => website category mapping categories = { 'New Feature': 'feature', 'Improvement': 'feature', 'Wish': 'feature', 'Task': 'feature', 'Test': 'bug', 'Bug': 'bug', 'Sub-task': 'feature' } titles = { 'feature': 'New Features and Improvements', 'bugfix': 'Bug Fixes' } issues_by_category = toolz.groupby( lambda issue: categories[issue.fields.issuetype.name], self.issues ) out = StringIO() for category in ('feature', 'bug'): title = titles[category] issues = issues_by_category[category] issues.sort(key=lambda x: x.key) out.write(md('## {}\n\n', title)) for issue in issues: link = md('[{0}]({1}/browse/{0})', issue.key, self.server) out.write(md('* {} - {}\n', link, issue.fields.summary)) out.write('\n') return out.getvalue() def render(self, old_changelog, website=False): old_changelog = old_changelog.splitlines() if website: new_changelog = self.format_website() else: new_changelog = self.format_markdown() out = StringIO() # Apache license header out.write('\n'.join(old_changelog[:18])) # Newly generated changelog today = datetime.today().strftime('%d %B %Y') out.write(md('\n\n# Apache Arrow {} ({})\n\n', self.version, today)) out.write(new_changelog) out.write('\n'.join(old_changelog[19:])) return out.getvalue().strip() class GitRemoteCallbacks(pygit2.RemoteCallbacks): def __init__(self, token): self.token = token self.attempts = 0 super().__init__() def push_update_reference(self, refname, message): pass def update_tips(self, refname, old, new): pass def credentials(self, url, username_from_url, allowed_types): # its a libgit2 bug, that it infinitly retries the authentication self.attempts += 1 if self.attempts >= 5: # pygit2 doesn't propagate the exception properly msg = 'Wrong oauth personal access token' print(msg) raise ValueError(msg) if allowed_types & pygit2.credentials.GIT_CREDTYPE_USERPASS_PLAINTEXT: return pygit2.UserPass(self.token, 'x-oauth-basic') else: return None class Repo: """Base class for interaction with local git repositories A high level wrapper used for both reading revision information from arrow's repository and pushing continuous integration tasks to the queue repository. Parameters ---------- require_https : boolean, default False Raise exception for SSH origin URLs """ def __init__(self, path, github_token=None, require_https=False): self.path = Path(path) self.repo = pygit2.Repository(str(self.path)) self.github_token = github_token self.require_https = require_https self._updated_refs = [] def __str__(self): tpl = dedent(''' Repo: {remote}@{branch} Commit: {head} ''') return tpl.format( remote=self.remote_url, branch=self.branch.branch_name, head=self.head ) @property def origin(self): remote = self.repo.remotes['origin'] if self.require_https and remote.url.startswith('[email protected]'): raise ValueError("Change SSH origin URL to HTTPS to use " "Crossbow: {}".format(remote.url)) return remote def fetch(self): refspec = '+refs/heads/*:refs/remotes/origin/*' self.origin.fetch([refspec]) def push(self): callbacks = GitRemoteCallbacks(self.github_token) try: self.origin.push(self._updated_refs, callbacks=callbacks) except pygit2.GitError: raise RuntimeError('Failed to push updated references, ' 'potentially because of credential issues: {}' .format(self._updated_refs)) else: self.updated_refs = [] @property def head(self): """Currently checked out commit's sha""" return self.repo.head @property def branch(self): """Currently checked out branch""" return self.repo.branches[self.repo.head.shorthand] @property def remote(self): """Currently checked out branch's remote counterpart""" if self.branch.upstream is None: raise RuntimeError('Cannot determine git remote to push to, try ' 'to push the branch first to have a remote ' 'tracking counterpart.') else: return self.repo.remotes[self.branch.upstream.remote_name] @property def remote_url(self): """Currently checked out branch's remote counterpart URL If an SSH github url is set, it will be replaced by the https equivalent usable with Github OAuth token. """ return _git_ssh_to_https(self.remote.url) @property def user_name(self): try: return next(self.repo.config.get_multivar('user.name')) except StopIteration: return os.environ.get('GIT_COMMITTER_NAME', 'unkown') @property def user_email(self): try: return next(self.repo.config.get_multivar('user.email')) except StopIteration: return os.environ.get('GIT_COMMITTER_EMAIL', 'unkown') @property def signature(self): return pygit2.Signature(self.user_name, self.user_email, int(time.time())) def create_tree(self, files): builder = self.repo.TreeBuilder() for filename, content in files.items(): if isinstance(content, dict): # create a subtree tree_id = self.create_tree(content) builder.insert(filename, tree_id, pygit2.GIT_FILEMODE_TREE) else: # create a file blob_id = self.repo.create_blob(content) builder.insert(filename, blob_id, pygit2.GIT_FILEMODE_BLOB) tree_id = builder.write() return tree_id def create_branch(self, branch_name, files, parents=[], message='', signature=None): # 1. create tree files = toolz.keymap(lambda path: tuple(path.split('/')), files) files = unflatten(files) tree_id = self.create_tree(files) # 2. create commit with the tree created above # TODO(kszucs): pass signature explicitly author = committer = self.signature commit_id = self.repo.create_commit(None, author, committer, message, tree_id, parents) commit = self.repo[commit_id] # 3. create branch pointing to the previously created commit branch = self.repo.create_branch(branch_name, commit) # append to the pushable references self._updated_refs.append('refs/heads/{}'.format(branch_name)) return branch def create_tag(self, tag_name, commit_id, message=''): tag_id = self.repo.create_tag(tag_name, commit_id, pygit2.GIT_OBJ_COMMIT, self.signature, message) # append to the pushable references self._updated_refs.append('refs/tags/{}'.format(tag_name)) return self.repo[tag_id] def file_contents(self, commit_id, file): commit = self.repo[commit_id] entry = commit.tree[file] blob = self.repo[entry.id] return blob.data def _parse_github_user_repo(self): m = re.match(r'.*\/([^\/]+)\/([^\/\.]+)(\.git)?$', self.remote_url) user, repo = m.group(1), m.group(2) return user, repo def as_github_repo(self): """Converts it to a repository object which wraps the GitHub API""" username, reponame = self._parse_github_user_repo() gh = github3.login(token=self.github_token) return gh.repository(username, reponame) def _git_ssh_to_https(url): return url.replace('[email protected]:', 'https://github.com/') class Queue(Repo): def _next_job_id(self, prefix): """Auto increments the branch's identifier based on the prefix""" pattern = re.compile(r'[\w\/-]*{}-(\d+)'.format(prefix)) matches = list(filter(None, map(pattern.match, self.repo.branches))) if matches: latest = max(int(m.group(1)) for m in matches) else: latest = 0 return '{}-{}'.format(prefix, latest + 1) def get(self, job_name): branch_name = 'origin/{}'.format(job_name) branch = self.repo.branches[branch_name] content = self.file_contents(branch.target, 'job.yml') buffer = StringIO(content.decode('utf-8')) return yaml.load(buffer) def put(self, job, prefix='build'): if not isinstance(job, Job): raise ValueError('`job` must be an instance of Job') if job.branch is not None: raise ValueError('`job.branch` is automatically generated, thus ' 'it must be blank') # auto increment and set next job id, e.g. build-85 job.branch = self._next_job_id(prefix) # create tasks' branches for task_name, task in job.tasks.items(): # adding CI's name to the end of the branch in order to use skip # patterns on travis and circleci task.branch = '{}-{}-{}'.format(job.branch, task.ci, task_name) files = task.render_files(job=job, arrow=job.target) branch = self.create_branch(task.branch, files=files) self.create_tag(task.tag, branch.target) task.commit = str(branch.target) # create job's branch with its description return self.create_branch(job.branch, files=job.render_files()) def github_statuses(self, job): repo = self.as_github_repo() return {name: repo.commit(task.commit).status() for name, task in job.tasks.items()} def github_assets(self, task): repo = self.as_github_repo() try: release = repo.release_from_tag(task.tag) except github3.exceptions.NotFoundError: return {} assets = {a.name: a for a in release.assets()} artifacts = {} for artifact in task.artifacts: # artifact can be a regex pattern pattern = re.compile(artifact) matches = list(filter(None, map(pattern.match, assets.keys()))) num_matches = len(matches) # validate artifact pattern matches single asset if num_matches > 1: raise ValueError( 'Only a single asset should match pattern `{}`, there are ' 'multiple ones: {}'.format(', '.join(matches)) ) elif num_matches == 1: artifacts[artifact] = assets[matches[0].group(0)] return artifacts def upload_assets(self, job, files, content_type): repo = self.as_github_repo() release = repo.release_from_tag(job.branch) assets = {a.name: a for a in release.assets()} for path in files: if path.name in assets: # remove already uploaded asset assets[path.name].delete() with path.open('rb') as fp: release.upload_asset(name=path.name, asset=fp, content_type=content_type) def get_version(root, **kwargs): """ Parse function for setuptools_scm that ignores tags for non-C++ subprojects, e.g. apache-arrow-js-XXX tags. """ kwargs['describe_command'] =\ 'git describe --dirty --tags --long --match "apache-arrow-[0-9].*"' return parse_git_version(root, **kwargs) class Target: """Describes target repository and revision the builds run against This serializable data container holding information about arrow's git remote, branch, sha and version number as well as some metadata (currently only an email address where the notification should be sent). """ def __init__(self, head, branch, remote, version, email=None): self.head = head self.email = email self.branch = branch self.remote = remote self.version = version self.no_rc_version = re.sub(r'-rc\d+\Z', '', version) @classmethod def from_repo(cls, repo, head=None, branch=None, remote=None, version=None, email=None): """Initialize from a repository Optionally override detected remote, branch, head, and/or version. """ assert isinstance(repo, Repo) if head is None: head = str(repo.head.target) if branch is None: branch = repo.branch.branch_name if remote is None: remote = repo.remote_url if version is None: version = get_version(repo.path).format_with('{tag}.dev{distance}') if email is None: email = repo.user_email return cls(head=head, email=email, branch=branch, remote=remote, version=version) class Task: """Describes a build task and metadata required to render CI templates A task is represented as a single git commit and branch containing jinja2 rendered files (currently appveyor.yml or .travis.yml configurations). A task can't be directly submitted to a queue, must belong to a job. Each task's unique identifier is its branch name, which is generated after submitting the job to a queue. """ def __init__(self, platform, ci, template, artifacts=None, params=None): assert platform in {'win', 'osx', 'linux'} assert ci in {'circle', 'travis', 'appveyor'} self.ci = ci self.platform = platform self.template = template self.artifacts = artifacts or [] self.params = params or {} self.branch = None # filled after adding to a queue self.commit = None def render_files(self, **extra_params): path = CWD / self.template params = toolz.merge(self.params, extra_params) template = Template(path.read_text(), undefined=StrictUndefined) rendered = template.render(task=self, **params) return toolz.merge(_default_tree, {self.filename: rendered}) @property def tag(self): return self.branch @property def filename(self): config_files = { 'circle': '.circleci/config.yml', 'travis': '.travis.yml', 'appveyor': 'appveyor.yml' } return config_files[self.ci] class Job: """Describes multiple tasks against a single target repository""" def __init__(self, target, tasks): if not tasks: raise ValueError('no tasks were provided for the job') if not all(isinstance(task, Task) for task in tasks.values()): raise ValueError('each `tasks` mus be an instance of Task') if not isinstance(target, Target): raise ValueError('`target` must be an instance of Target') self.target = target self.tasks = tasks self.branch = None # filled after adding to a queue def render_files(self): with StringIO() as buf: yaml.dump(self, buf) content = buf.getvalue() return toolz.merge(_default_tree, {'job.yml': content}) @property def email(self): return os.environ.get('CROSSBOW_EMAIL', self.target.email) # configure yaml serializer yaml = YAML() yaml.register_class(Job) yaml.register_class(Task) yaml.register_class(Target) # state color mapping to highlight console output COLORS = {'ok': 'green', 'error': 'red', 'missing': 'red', 'failure': 'red', 'pending': 'yellow', 'success': 'green'} # define default paths DEFAULT_CONFIG_PATH = CWD / 'tasks.yml' DEFAULT_ARROW_PATH = CWD.parents[1] DEFAULT_QUEUE_PATH = CWD.parents[2] / 'crossbow' @click.group() @click.option('--github-token', '-t', default=None, help='OAuth token for GitHub authentication') @click.option('--arrow-path', '-a', type=click.Path(exists=True), default=DEFAULT_ARROW_PATH, help='Arrow\'s repository path. Defaults to the repository of ' 'this script') @click.option('--queue-path', '-q', type=click.Path(exists=True), default=DEFAULT_QUEUE_PATH, help='The repository path used for scheduling the tasks. ' 'Defaults to crossbow directory placed next to arrow') @click.pass_context def crossbow(ctx, github_token, arrow_path, queue_path): if github_token is None: raise click.ClickException( 'Could not determine GitHub token. Please set the ' 'CROSSBOW_GITHUB_TOKEN environment variable to a ' 'valid GitHub access token or pass one to --github-token.' ) ctx.obj['arrow'] = Repo(arrow_path) ctx.obj['queue'] = Queue(queue_path, github_token=github_token, require_https=True) @crossbow.command() @click.option('--changelog-path', '-c', type=click.Path(exists=True), default=DEFAULT_ARROW_PATH / 'CHANGELOG.md', help='Path of changelog to update') @click.option('--arrow-version', '-v', default=None, help='Set target version explicitly') @click.option('--is-website', '-w', default=False) @click.option('--jira-username', '-u', default=None, help='JIRA username') @click.option('--jira-password', '-P', default=None, help='JIRA password') @click.option('--dry-run/--write', default=False, help='Just display the new changelog, don\'t write it') @click.pass_context def changelog(ctx, changelog_path, arrow_version, is_website, jira_username, jira_password, dry_run): changelog_path = Path(changelog_path) target = Target.from_repo(ctx.obj['arrow']) version = arrow_version or target.version changelog = JiraChangelog(version, username=jira_username, password=jira_password) new_content = changelog.render(changelog_path.read_text(), website=is_website) if dry_run: click.echo(new_content) else: changelog_path.write_text(new_content) click.echo('New changelog successfully generated, see git diff for the' 'changes') def load_tasks_from_config(config_path, task_names, group_names): with Path(config_path).open() as fp: config = yaml.load(fp) groups = config['groups'] tasks = config['tasks'] valid_groups = set(groups.keys()) valid_tasks = set(tasks.keys()) requested_groups = set(group_names) invalid_groups = requested_groups - valid_groups if invalid_groups: raise click.ClickException('Invalid group(s) {!r}. Must be one of {!r}' .format(invalid_groups, valid_groups)) requested_tasks = [list(groups[name]) for name in group_names] requested_tasks = set(sum(requested_tasks, list(task_names))) invalid_tasks = requested_tasks - valid_tasks if invalid_tasks: raise click.ClickException('Invalid task(s) {!r}. Must be one of {!r}' .format(invalid_tasks, valid_tasks)) return {t: config['tasks'][t] for t in requested_tasks} @crossbow.command() @click.argument('task', nargs=-1, required=False) @click.option('--group', '-g', multiple=True, help='Submit task groups as defined in task.yml') @click.option('--job-prefix', default='build', help='Arbitrary prefix for branch names, e.g. nightly') @click.option('--config-path', '-c', type=click.Path(exists=True), default=DEFAULT_CONFIG_PATH, help='Task configuration yml. Defaults to tasks.yml') @click.option('--arrow-version', '-v', default=None, help='Set target version explicitly.') @click.option('--arrow-remote', '-r', default=None, help='Set Github remote explicitly, which is going to be cloned ' 'on the CI services. Note, that no validation happens ' 'locally. Examples: https://github.com/apache/arrow or ' 'https://github.com/kszucs/arrow.') @click.option('--arrow-branch', '-b', default=None, help='Give the branch name explicitly, e.g. master, ARROW-1949.') @click.option('--arrow-sha', '-t', default=None, help='Set commit SHA or Tag name explicitly, e.g. f67a515, ' 'apache-arrow-0.11.1.') @click.option('--dry-run/--push', default=False, help='Just display the rendered CI configurations without ' 'submitting them') @click.option('--output', metavar='<output>', type=click.File('w', encoding='utf8'), default='-', help='Capture output result into file.') @click.pass_context def submit(ctx, task, group, job_prefix, config_path, arrow_version, arrow_remote, arrow_branch, arrow_sha, dry_run, output): queue, arrow = ctx.obj['queue'], ctx.obj['arrow'] # Override the detected repo url / remote, branch and sha - this aims to # make release procedure a bit simpler. # Note, that the target resivion's crossbow templates must be # compatible with the locally checked out version of crossbow (which is # in case of the release procedure), because the templates still # contain some business logic (dependency installation, deployments) # which will be reduced to a single command in the future. target = Target.from_repo(arrow, remote=arrow_remote, branch=arrow_branch, head=arrow_sha, version=arrow_version) params = { 'version': target.version, 'no_rc_version': target.no_rc_version, } # task and group variables are lists, containing multiple values tasks = {} task_configs = load_tasks_from_config(config_path, task, group) for name, task in task_configs.items(): # replace version number and create task instance from configuration artifacts = task.pop('artifacts', None) or [] # because of yaml artifacts = [fn.format(**params) for fn in artifacts] tasks[name] = Task(artifacts=artifacts, **task) # create job instance, doesn't mutate git data yet job = Job(target=target, tasks=tasks) if dry_run: yaml.dump(job, output) else: queue.fetch() queue.put(job, prefix=job_prefix) queue.push() yaml.dump(job, output) click.echo('Pushed job identifier is: `{}`'.format(job.branch)) @crossbow.command() @click.argument('job-name', required=True) @click.option('--output', metavar='<output>', type=click.File('w', encoding='utf8'), default='-', help='Capture output result into file.') @click.pass_context def status(ctx, job_name, output): queue = ctx.obj['queue'] queue.fetch() tpl = '[{:>7}] {:<49} {:>20}' header = tpl.format('status', 'branch', 'artifacts') click.echo(header, file=output) click.echo('-' * len(header), file=output) job = queue.get(job_name) statuses = queue.github_statuses(job) for task_name, task in sorted(job.tasks.items()): status = statuses[task_name] assets = queue.github_assets(task) uploaded = 'uploaded {} / {}'.format( sum(a in assets for a in task.artifacts), len(task.artifacts) ) leadline = tpl.format(status.state.upper(), task.branch, uploaded) click.echo(click.style(leadline, fg=COLORS[status.state]), file=output) for artifact in task.artifacts: try: asset = assets[artifact] except KeyError: state = 'pending' if status.state == 'pending' else 'missing' filename = '{:>70} '.format(artifact) else: state = 'ok' filename = '{:>70} '.format(asset.name) statemsg = '[{:>7}]'.format(state.upper()) click.echo(filename + click.style(statemsg, fg=COLORS[state]), file=output) def hashbytes(bytes, algoname): """Hash `bytes` using the algorithm named `algoname`. Parameters ---------- bytes : bytes The bytes to hash algoname : str The name of class in the hashlib standard library module Returns ------- str Hexadecimal digest of `bytes` hashed using `algoname` """ algo = getattr(hashlib, algoname)() algo.update(bytes) result = algo.hexdigest() return result @crossbow.command() @click.argument('job-name', required=True) @click.option('-g', '--gpg-homedir', default=None, type=click.Path(exists=True, file_okay=False, dir_okay=True), help=('Full pathname to directory containing the public and ' 'private keyrings. Default is whatever GnuPG defaults to')) @click.option('-t', '--target-dir', default=DEFAULT_ARROW_PATH / 'packages', type=click.Path(file_okay=False, dir_okay=True), help='Directory to download the build artifacts') @click.option('-a', '--algorithm', default=['sha256', 'sha512'], show_default=True, type=click.Choice(sorted(hashlib.algorithms_guaranteed)), multiple=True, help=('Algorithm(s) used to generate checksums. Pass multiple ' 'algorithms by passing -a/--algorithm multiple times')) @click.pass_context def sign(ctx, job_name, gpg_homedir, target_dir, algorithm): """Download and sign build artifacts from github releases""" gpg = gnupg.GPG(gnupghome=gpg_homedir) # fetch the queue repository queue = ctx.obj['queue'] queue.fetch() # query the job's artifacts job = queue.get(job_name) target_dir = Path(target_dir).absolute() / job_name target_dir.mkdir(parents=True, exist_ok=True) click.echo('Download {}\'s artifacts to {}'.format(job_name, target_dir)) tpl = '{:<10} {:>73}' task_items = sorted(job.tasks.items()) ntasks = len(task_items) for i, (task_name, task) in enumerate(task_items, start=1): assets = queue.github_assets(task) artifact_dir = target_dir / task_name artifact_dir.mkdir(exist_ok=True) basemsg = 'Downloading and signing assets for task {}'.format( click.style(task_name, bold=True) ) click.echo( '\n{} {:>{size}}' .format( basemsg, click.style('{}/{}'.format(i, ntasks), bold=True), size=89 - (len(basemsg) + 1) + 2 * len( click.style('', bold=True)) ) ) click.echo('-' * 89) for artifact in task.artifacts: try: asset = assets[artifact] except KeyError: msg = click.style('[{:>13}]'.format('MISSING'), fg=COLORS['missing']) click.echo(tpl.format(msg, artifact)) else: click.echo(click.style(artifact, bold=True)) # download artifact artifact_path = artifact_dir / asset.name asset.download(artifact_path) # sign the artifact signature_path = Path(str(artifact_path) + '.asc') with artifact_path.open('rb') as fp: gpg.sign_file(fp, detach=True, clearsign=False, binary=False, output=str(signature_path)) # compute checksums for the artifact artifact_bytes = artifact_path.read_bytes() for algo in algorithm: suffix = '.{}'.format(algo) checksum_path = Path(str(artifact_path) + suffix) checksum = '{} {}'.format( hashbytes(artifact_bytes, algo), artifact_path.name ) checksum_path.write_text(checksum) msg = click.style( '[{:>13}]'.format('{} HASHED'.format(algo)), fg='blue' ) click.echo(tpl.format(msg, checksum_path.name)) msg = click.style('[{:>13}]'.format('SIGNED'), fg=COLORS['ok']) click.echo(tpl.format(msg, str(signature_path.name))) if __name__ == '__main__': crossbow(obj={}, auto_envvar_prefix='CROSSBOW')
[]
[]
[ "GIT_COMMITTER_EMAIL", "GIT_COMMITTER_NAME", "CROSSBOW_EMAIL" ]
[]
["GIT_COMMITTER_EMAIL", "GIT_COMMITTER_NAME", "CROSSBOW_EMAIL"]
python
3
0
tests/bugs/core_6089_test.py
#coding:utf-8 # # id: bugs.core_6089 # title: BLOBs are unnecessarily copied during UPDATE after a table format change # decription: # It's not easy to obtain BLOB_ID using only fdb. Rather in ISQL blob_id will be shown always (even if we do not want this :)). # This test runs ISQL with commands that were provided in the ticket and parses its result by extracting only column BLOB_ID. # Each BLOB_ID is added to set(), so eventually we can get total number of UNIQUE blob IDs that were generated during test. # This number must be equal to number of records in the table (three in this test). # # Confirmed bug on: 4.0.0.1535; 3.0.5.33142. # Works fine on: # 4.0.0.1556: OK, 3.384s. # 3.0.5.33152: OK, 2.617s. # # # tracker_id: CORE-6089 # min_versions: ['3.0.5'] # versions: 3.0.5 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0.5 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # import os # import re # import subprocess # import time # import fdb # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # db_conn.close() # # #-------------------------------------------- # # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os # # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() # # #-------------------------------------------- # # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): # if type(f_names_list[i]) == file: # del_name = f_names_list[i].name # elif type(f_names_list[i]) == str: # del_name = f_names_list[i] # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None # # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) # # #-------------------------------------------- # # allowed_patterns = ( re.compile('COL2_BLOB_ID\\s+\\S+', re.IGNORECASE), ) # # sql_txt=''' # set bail on; # set list on; # set blob off; # recreate table t (col1 int, col2 blob); # recreate view v as select col2 as col2_blob_id from t; -- NB: alias for column have to be matched to re.compile() argument # commit; # # insert into t values (1, '1'); # insert into t values (2, '2'); # insert into t values (3, '3'); # commit; # # select v.* from v; # update t set col1 = -col1; # select v.* from v; # # # rollback; # alter table t add col3 date; # select v.* from v; # update t set col1 = -col1; # select v.* from v; -- bug was here # quit; # ''' # # f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_6089.sql'), 'w') # f_isql_cmd.write( sql_txt ) # flush_and_close( f_isql_cmd ) # # f_isql_log=open( os.path.join(context['temp_directory'],'tmp_6089.log'), 'w') # # subprocess.call([context['isql_path'], dsn, "-q", "-i", f_isql_cmd.name], stdout=f_isql_log, stderr=subprocess.STDOUT) # flush_and_close( f_isql_log ) # # blob_id_set=set() # with open( f_isql_log.name,'r') as f: # for line in f: # match2some = filter( None, [ p.search(line) for p in allowed_patterns ] ) # if match2some: # blob_id_set.add( line.split()[1] ) # # print( 'Number of unique blob IDs: ' + str(len(blob_id_set)) ) # # # Cleanup. # ########## # cleanup( (f_isql_cmd, f_isql_log) ) # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ Number of unique blob IDs: 3 """ @pytest.mark.version('>=3.0.5') @pytest.mark.xfail def test_1(db_1): pytest.fail("Test not IMPLEMENTED")
[]
[]
[ "ISC_USER", "ISC_PASSWORD" ]
[]
["ISC_USER", "ISC_PASSWORD"]
python
2
0
src/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("ops!") from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/joho/godotenv/godotenv.go
// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) // // Examples/readme can be found on the github page at https://github.com/joho/godotenv // // The TL;DR is that you make a .env file that looks something like // // SOME_ENV_VAR=somevalue // // and then in your go code you can call // // godotenv.Load() // // and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") package godotenv import ( "bufio" "errors" "fmt" "io" "os" "os/exec" "regexp" "sort" "strings" ) const doubleQuoteSpecialChars = "\\\n\r\"!$`" // Load will read your env file(s) and load them into ENV for this process. // // Call this function as close as possible to the start of your program (ideally in main) // // If you call Load without any args it will default to loading .env in the current path // // You can otherwise tell it which files to load (there can be more than one) like // // godotenv.Load("fileone", "filetwo") // // It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults func Load(filenames ...string) (err error) { filenames = filenamesOrDefault(filenames) for _, filename := range filenames { err = loadFile(filename, false) if err != nil { return // return early on a spazout } } return } // Overload will read your env file(s) and load them into ENV for this process. // // Call this function as close as possible to the start of your program (ideally in main) // // If you call Overload without any args it will default to loading .env in the current path // // You can otherwise tell it which files to load (there can be more than one) like // // godotenv.Overload("fileone", "filetwo") // // It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. func Overload(filenames ...string) (err error) { filenames = filenamesOrDefault(filenames) for _, filename := range filenames { err = loadFile(filename, true) if err != nil { return // return early on a spazout } } return } // Read all env (with same file loading semantics as Load) but return values as // a map rather than automatically writing values into env func Read(filenames ...string) (envMap map[string]string, err error) { filenames = filenamesOrDefault(filenames) envMap = make(map[string]string) for _, filename := range filenames { individualEnvMap, individualErr := readFile(filename) if individualErr != nil { err = individualErr return // return early on a spazout } for key, value := range individualEnvMap { envMap[key] = value } } return } // Parse reads an env file from io.Reader, returning a map of keys and values. func Parse(r io.Reader) (envMap map[string]string, err error) { envMap = make(map[string]string) var lines []string scanner := bufio.NewScanner(r) for scanner.Scan() { lines = append(lines, scanner.Text()) } if err = scanner.Err(); err != nil { return } for _, fullLine := range lines { if !isIgnoredLine(fullLine) { var key, value string key, value, err = parseLine(fullLine, envMap) if err != nil { return } envMap[key] = value } } return } //Unmarshal reads an env file from a string, returning a map of keys and values. func Unmarshal(str string) (envMap map[string]string, err error) { return Parse(strings.NewReader(str)) } // Exec loads env vars from the specified filenames (empty map falls back to default) // then executes the cmd specified. // // Simply hooks up os.Stdin/err/out to the command and calls Run() // // If you want more fine grained control over your command it's recommended // that you use `Load()` or `Read()` and the `os/exec` package yourself. func Exec(filenames []string, cmd string, cmdArgs []string) error { Load(filenames...) command := exec.Command(cmd, cmdArgs...) command.Stdin = os.Stdin command.Stdout = os.Stdout command.Stderr = os.Stderr return command.Run() } // Write serializes the given environment and writes it to a file func Write(envMap map[string]string, filename string) error { content, error := Marshal(envMap) if error != nil { return error } file, error := os.Create(filename) if error != nil { return error } _, err := file.WriteString(content) return err } // Marshal outputs the given environment as a dotenv-formatted environment file. // Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. func Marshal(envMap map[string]string) (string, error) { lines := make([]string, 0, len(envMap)) for k, v := range envMap { lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) } sort.Strings(lines) return strings.Join(lines, "\n"), nil } func filenamesOrDefault(filenames []string) []string { if len(filenames) == 0 { return []string{".env"} } return filenames } func loadFile(filename string, overload bool) error { envMap, err := readFile(filename) if err != nil { return err } currentEnv := map[string]bool{} rawEnv := os.Environ() for _, rawEnvLine := range rawEnv { key := strings.Split(rawEnvLine, "=")[0] currentEnv[key] = true } for key, value := range envMap { if !currentEnv[key] || overload { os.Setenv(key, value) } } return nil } func readFile(filename string) (envMap map[string]string, err error) { file, err := os.Open(filename) if err != nil { return } defer file.Close() return Parse(file) } var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`) func parseLine(line string, envMap map[string]string) (key string, value string, err error) { if len(line) == 0 { err = errors.New("zero length string") return } // ditch the comments (but keep quoted hashes) if strings.Contains(line, "#") { segmentsBetweenHashes := strings.Split(line, "#") quotesAreOpen := false var segmentsToKeep []string for _, segment := range segmentsBetweenHashes { if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { if quotesAreOpen { quotesAreOpen = false segmentsToKeep = append(segmentsToKeep, segment) } else { quotesAreOpen = true } } if len(segmentsToKeep) == 0 || quotesAreOpen { segmentsToKeep = append(segmentsToKeep, segment) } } line = strings.Join(segmentsToKeep, "#") } firstEquals := strings.Index(line, "=") firstColon := strings.Index(line, ":") splitString := strings.SplitN(line, "=", 2) if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { //this is a yaml-style line splitString = strings.SplitN(line, ":", 2) } if len(splitString) != 2 { err = errors.New("Can't separate key from value") return } // Parse the key key = splitString[0] if strings.HasPrefix(key, "export") { key = strings.TrimPrefix(key, "export") } key = strings.TrimSpace(key) key = exportRegex.ReplaceAllString(splitString[0], "$1") // Parse the value value = parseValue(splitString[1], envMap) return } var ( singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`) doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`) escapeRegex = regexp.MustCompile(`\\.`) unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) ) func parseValue(value string, envMap map[string]string) string { // trim value = strings.Trim(value, " ") // check if we've got quoted values or possible escapes if len(value) > 1 { singleQuotes := singleQuotesRegex.FindStringSubmatch(value) doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value) if singleQuotes != nil || doubleQuotes != nil { // pull the quotes off the edges value = value[1 : len(value)-1] } if doubleQuotes != nil { // expand newlines value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { c := strings.TrimPrefix(match, `\`) switch c { case "n": return "\n" case "r": return "\r" default: return match } }) // unescape characters value = unescapeCharsRegex.ReplaceAllString(value, "$1") } if singleQuotes == nil { value = expandVariables(value, envMap) } } return value } var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) func expandVariables(v string, m map[string]string) string { return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { submatch := expandVarRegex.FindStringSubmatch(s) if submatch == nil { return s } if submatch[1] == "\\" || submatch[2] == "(" { return submatch[0][1:] } else if submatch[4] != "" { return m[submatch[4]] } return s }) } func isIgnoredLine(line string) bool { trimmedLine := strings.TrimSpace(line) return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") } func doubleQuoteEscape(line string) string { for _, c := range doubleQuoteSpecialChars { toReplace := "\\" + string(c) if c == '\n' { toReplace = `\n` } if c == '\r' { toReplace = `\r` } line = strings.Replace(line, string(c), toReplace, -1) } return line }
[ "\"SOME_ENV_VAR\"" ]
[]
[ "SOME_ENV_VAR" ]
[]
["SOME_ENV_VAR"]
go
1
0
app/config.py
import os class Config: NEWSAPIKEY = os.environ.get('NEWSAPIKEY')
[]
[]
[ "NEWSAPIKEY" ]
[]
["NEWSAPIKEY"]
python
1
0
tests/acceptance/cli_test.py
from __future__ import absolute_import from __future__ import unicode_literals import datetime import json import os import signal import subprocess import time from collections import Counter from collections import namedtuple from operator import attrgetter import py import yaml from docker import errors from .. import mock from compose.cli.command import get_project from compose.container import Container from compose.project import OneOffFilter from tests.integration.testcases import DockerClientTestCase from tests.integration.testcases import get_links from tests.integration.testcases import pull_busybox from tests.integration.testcases import v2_only ProcessResult = namedtuple('ProcessResult', 'stdout stderr') BUILD_CACHE_TEXT = 'Using cache' BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest' def start_process(base_dir, options): proc = subprocess.Popen( ['docker-compose'] + options, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=base_dir) print("Running process: %s" % proc.pid) return proc def wait_on_process(proc, returncode=0): stdout, stderr = proc.communicate() if proc.returncode != returncode: print("Stderr: {}".format(stderr)) print("Stdout: {}".format(stdout)) assert proc.returncode == returncode return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8')) def wait_on_condition(condition, delay=0.1, timeout=40): start_time = time.time() while not condition(): if time.time() - start_time > timeout: raise AssertionError("Timeout: %s" % condition) time.sleep(delay) def kill_service(service): for container in service.containers(): container.kill() class ContainerCountCondition(object): def __init__(self, project, expected): self.project = project self.expected = expected def __call__(self): return len(self.project.containers()) == self.expected def __str__(self): return "waiting for counter count == %s" % self.expected class ContainerStateCondition(object): def __init__(self, client, name, status): self.client = client self.name = name self.status = status def __call__(self): try: container = self.client.inspect_container(self.name) return container['State']['Status'] == self.status except errors.APIError: return False def __str__(self): return "waiting for container to be %s" % self.status class CLITestCase(DockerClientTestCase): def setUp(self): super(CLITestCase, self).setUp() self.base_dir = 'tests/fixtures/simple-composefile' def tearDown(self): if self.base_dir: self.project.kill() self.project.remove_stopped() for container in self.project.containers(stopped=True, one_off=OneOffFilter.only): container.remove(force=True) networks = self.client.networks() for n in networks: if n['Name'].startswith('{}_'.format(self.project.name)): self.client.remove_network(n['Name']) super(CLITestCase, self).tearDown() @property def project(self): # Hack: allow project to be overridden if not hasattr(self, '_project'): self._project = get_project(self.base_dir) return self._project def dispatch(self, options, project_options=None, returncode=0): project_options = project_options or [] proc = start_process(self.base_dir, project_options + options) return wait_on_process(proc, returncode=returncode) def execute(self, container, cmd): # Remove once Hijack and CloseNotifier sign a peace treaty self.client.close() exc = self.client.exec_create(container.id, cmd) self.client.exec_start(exc) return self.client.exec_inspect(exc)['ExitCode'] def lookup(self, container, hostname): return self.execute(container, ["nslookup", hostname]) == 0 def test_help(self): self.base_dir = 'tests/fixtures/no-composefile' result = self.dispatch(['help', 'up'], returncode=0) assert 'Usage: up [options] [SERVICE...]' in result.stdout # Prevent tearDown from trying to create a project self.base_dir = None def test_shorthand_host_opt(self): self.dispatch( ['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')), 'up', '-d'], returncode=0 ) def test_config_list_services(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config', '--services']) assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'} def test_config_quiet_with_error(self): self.base_dir = None result = self.dispatch([ '-f', 'tests/fixtures/invalid-composefile/invalid.yml', 'config', '-q' ], returncode=1) assert "'notaservice' must be a mapping" in result.stderr def test_config_quiet(self): self.base_dir = 'tests/fixtures/v2-full' assert self.dispatch(['config', '-q']).stdout == '' def test_config_default(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config']) # assert there are no python objects encoded in the output assert '!!' not in result.stdout output = yaml.load(result.stdout) expected = { 'version': '2.0', 'volumes': {'data': {'driver': 'local'}}, 'networks': {'front': {}}, 'services': { 'web': { 'build': { 'context': os.path.abspath(self.base_dir), }, 'networks': {'front': None, 'default': None}, 'volumes_from': ['service:other:rw'], }, 'other': { 'image': 'busybox:latest', 'command': 'top', 'volumes': ['/data:rw'], }, }, } assert output == expected def test_config_restart(self): self.base_dir = 'tests/fixtures/restart' result = self.dispatch(['config']) assert yaml.load(result.stdout) == { 'version': '2.0', 'services': { 'never': { 'image': 'busybox', 'restart': 'no', }, 'always': { 'image': 'busybox', 'restart': 'always', }, 'on-failure': { 'image': 'busybox', 'restart': 'on-failure', }, 'on-failure-5': { 'image': 'busybox', 'restart': 'on-failure:5', }, }, 'networks': {}, 'volumes': {}, } def test_config_external_network(self): self.base_dir = 'tests/fixtures/networks' result = self.dispatch(['-f', 'external-networks.yml', 'config']) json_result = yaml.load(result.stdout) assert 'networks' in json_result assert json_result['networks'] == { 'networks_foo': { 'external': True # {'name': 'networks_foo'} }, 'bar': { 'external': {'name': 'networks_bar'} } } def test_config_v1(self): self.base_dir = 'tests/fixtures/v1-config' result = self.dispatch(['config']) assert yaml.load(result.stdout) == { 'version': '2.0', 'services': { 'net': { 'image': 'busybox', 'network_mode': 'bridge', }, 'volume': { 'image': 'busybox', 'volumes': ['/data:rw'], 'network_mode': 'bridge', }, 'app': { 'image': 'busybox', 'volumes_from': ['service:volume:rw'], 'network_mode': 'service:net', }, }, 'networks': {}, 'volumes': {}, } def test_ps(self): self.project.get_service('simple').create_container() result = self.dispatch(['ps']) assert 'simplecomposefile_simple_1' in result.stdout def test_ps_default_composefile(self): self.base_dir = 'tests/fixtures/multiple-composefiles' self.dispatch(['up', '-d']) result = self.dispatch(['ps']) self.assertIn('multiplecomposefiles_simple_1', result.stdout) self.assertIn('multiplecomposefiles_another_1', result.stdout) self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout) def test_ps_alternate_composefile(self): config_path = os.path.abspath( 'tests/fixtures/multiple-composefiles/compose2.yml') self._project = get_project(self.base_dir, [config_path]) self.base_dir = 'tests/fixtures/multiple-composefiles' self.dispatch(['-f', 'compose2.yml', 'up', '-d']) result = self.dispatch(['-f', 'compose2.yml', 'ps']) self.assertNotIn('multiplecomposefiles_simple_1', result.stdout) self.assertNotIn('multiplecomposefiles_another_1', result.stdout) self.assertIn('multiplecomposefiles_yetanother_1', result.stdout) def test_pull(self): result = self.dispatch(['pull']) assert sorted(result.stderr.split('\n'))[1:] == [ 'Pulling another (busybox:latest)...', 'Pulling simple (busybox:latest)...', ] def test_pull_with_digest(self): result = self.dispatch(['-f', 'digest.yml', 'pull']) assert 'Pulling simple (busybox:latest)...' in result.stderr assert ('Pulling digest (busybox@' 'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520' '04ee8502d)...') in result.stderr def test_pull_with_ignore_pull_failures(self): result = self.dispatch([ '-f', 'ignore-pull-failures.yml', 'pull', '--ignore-pull-failures']) assert 'Pulling simple (busybox:latest)...' in result.stderr assert 'Pulling another (nonexisting-image:latest)...' in result.stderr assert 'Error: image library/nonexisting-image' in result.stderr assert 'not found' in result.stderr def test_build_plain(self): self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', 'simple']) assert BUILD_CACHE_TEXT in result.stdout assert BUILD_PULL_TEXT not in result.stdout def test_build_no_cache(self): self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', '--no-cache', 'simple']) assert BUILD_CACHE_TEXT not in result.stdout assert BUILD_PULL_TEXT not in result.stdout def test_build_pull(self): # Make sure we have the latest busybox already pull_busybox(self.client) self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple'], None) result = self.dispatch(['build', '--pull', 'simple']) assert BUILD_CACHE_TEXT in result.stdout assert BUILD_PULL_TEXT in result.stdout def test_build_no_cache_pull(self): # Make sure we have the latest busybox already pull_busybox(self.client) self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', '--no-cache', '--pull', 'simple']) assert BUILD_CACHE_TEXT not in result.stdout assert BUILD_PULL_TEXT in result.stdout def test_build_failed(self): self.base_dir = 'tests/fixtures/simple-failing-dockerfile' self.dispatch(['build', 'simple'], returncode=1) labels = ["com.docker.compose.test_failing_image=true"] containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers( all=True, filters={"label": labels}) ] assert len(containers) == 1 def test_build_failed_forcerm(self): self.base_dir = 'tests/fixtures/simple-failing-dockerfile' self.dispatch(['build', '--force-rm', 'simple'], returncode=1) labels = ["com.docker.compose.test_failing_image=true"] containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers( all=True, filters={"label": labels}) ] assert not containers def test_bundle_with_digests(self): self.base_dir = 'tests/fixtures/bundle-with-digests/' tmpdir = py.test.ensuretemp('cli_test_bundle') self.addCleanup(tmpdir.remove) filename = str(tmpdir.join('example.dab')) self.dispatch(['bundle', '--output', filename]) with open(filename, 'r') as fh: bundle = json.load(fh) assert bundle == { 'Version': '0.1', 'Services': { 'web': { 'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3' '44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'), 'Networks': ['default'], }, 'redis': { 'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d' '374b2b7392de1e7d77be26ef8f7b'), 'Networks': ['default'], } }, } def test_create(self): self.dispatch(['create']) service = self.project.get_service('simple') another = self.project.get_service('another') self.assertEqual(len(service.containers()), 0) self.assertEqual(len(another.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertEqual(len(another.containers(stopped=True)), 1) def test_create_with_force_recreate(self): self.dispatch(['create'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) old_ids = [c.id for c in service.containers(stopped=True)] self.dispatch(['create', '--force-recreate'], None) self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) new_ids = [c.id for c in service.containers(stopped=True)] self.assertNotEqual(old_ids, new_ids) def test_create_with_no_recreate(self): self.dispatch(['create'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) old_ids = [c.id for c in service.containers(stopped=True)] self.dispatch(['create', '--no-recreate'], None) self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) new_ids = [c.id for c in service.containers(stopped=True)] self.assertEqual(old_ids, new_ids) def test_create_with_force_recreate_and_no_recreate(self): self.dispatch( ['create', '--force-recreate', '--no-recreate'], returncode=1) def test_down_invalid_rmi_flag(self): result = self.dispatch(['down', '--rmi', 'bogus'], returncode=1) assert '--rmi flag must be' in result.stderr @v2_only() def test_down(self): self.base_dir = 'tests/fixtures/v2-full' self.dispatch(['up', '-d']) wait_on_condition(ContainerCountCondition(self.project, 2)) self.dispatch(['run', 'web', 'true']) self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null']) assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2 result = self.dispatch(['down', '--rmi=local', '--volumes']) assert 'Stopping v2full_web_1' in result.stderr assert 'Stopping v2full_other_1' in result.stderr assert 'Stopping v2full_web_run_2' in result.stderr assert 'Removing v2full_web_1' in result.stderr assert 'Removing v2full_other_1' in result.stderr assert 'Removing v2full_web_run_1' in result.stderr assert 'Removing v2full_web_run_2' in result.stderr assert 'Removing volume v2full_data' in result.stderr assert 'Removing image v2full_web' in result.stderr assert 'Removing image busybox' not in result.stderr assert 'Removing network v2full_default' in result.stderr assert 'Removing network v2full_front' in result.stderr def test_up_detached(self): self.dispatch(['up', '-d']) service = self.project.get_service('simple') another = self.project.get_service('another') self.assertEqual(len(service.containers()), 1) self.assertEqual(len(another.containers()), 1) # Ensure containers don't have stdin and stdout connected in -d mode container, = service.containers() self.assertFalse(container.get('Config.AttachStderr')) self.assertFalse(container.get('Config.AttachStdout')) self.assertFalse(container.get('Config.AttachStdin')) def test_up_attached(self): self.base_dir = 'tests/fixtures/echo-services' result = self.dispatch(['up', '--no-color']) assert 'simple_1 | simple' in result.stdout assert 'another_1 | another' in result.stdout assert 'simple_1 exited with code 0' in result.stdout assert 'another_1 exited with code 0' in result.stdout @v2_only() def test_up(self): self.base_dir = 'tests/fixtures/v2-simple' self.dispatch(['up', '-d'], None) services = self.project.get_services() network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) self.assertEqual(len(networks), 1) self.assertEqual(networks[0]['Driver'], 'bridge') assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options'] network = self.client.inspect_network(networks[0]['Id']) for service in services: containers = service.containers() self.assertEqual(len(containers), 1) container = containers[0] self.assertIn(container.id, network['Containers']) networks = container.get('NetworkSettings.Networks') self.assertEqual(list(networks), [network['Name']]) self.assertEqual( sorted(networks[network['Name']]['Aliases']), sorted([service.name, container.short_id])) for service in services: assert self.lookup(container, service.name) @v2_only() def test_up_with_default_network_config(self): filename = 'default-network-config.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], None) network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false' @v2_only() def test_up_with_network_aliases(self): filename = 'network-aliases.yml' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'up', '-d'], None) back_name = '{}_back'.format(self.project.name) front_name = '{}_front'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] # Two networks were created: back and front assert sorted(n['Name'] for n in networks) == [back_name, front_name] web_container = self.project.get_service('web').containers()[0] back_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(back_name) ) assert 'web' in back_aliases front_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(front_name) ) assert 'web' in front_aliases assert 'forward_facing' in front_aliases assert 'ahead' in front_aliases @v2_only() def test_up_with_network_static_addresses(self): filename = 'network-static-addresses.yml' ipv4_address = '172.16.100.100' ipv6_address = 'fe80::1001:100' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'up', '-d'], None) static_net = '{}_static_test'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] # One networks was created: front assert sorted(n['Name'] for n in networks) == [static_net] web_container = self.project.get_service('web').containers()[0] ipam_config = web_container.get( 'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net) ) assert ipv4_address in ipam_config.values() assert ipv6_address in ipam_config.values() @v2_only() def test_up_with_networks(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d'], None) back_name = '{}_back'.format(self.project.name) front_name = '{}_front'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] # Two networks were created: back and front assert sorted(n['Name'] for n in networks) == [back_name, front_name] back_network = [n for n in networks if n['Name'] == back_name][0] front_network = [n for n in networks if n['Name'] == front_name][0] web_container = self.project.get_service('web').containers()[0] app_container = self.project.get_service('app').containers()[0] db_container = self.project.get_service('db').containers()[0] for net_name in [front_name, back_name]: links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name)) assert '{}:database'.format(db_container.name) in links # db and app joined the back network assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id]) # web and app joined the front network assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id]) # web can see app but not db assert self.lookup(web_container, "app") assert not self.lookup(web_container, "db") # app can see db assert self.lookup(app_container, "db") # app has aliased db to "database" assert self.lookup(app_container, "database") @v2_only() def test_up_missing_network(self): self.base_dir = 'tests/fixtures/networks' result = self.dispatch( ['-f', 'missing-network.yml', 'up', '-d'], returncode=1) assert 'Service "web" uses an undefined network "foo"' in result.stderr @v2_only() def test_up_with_network_mode(self): c = self.client.create_container('busybox', 'top', name='composetest_network_mode_container') self.addCleanup(self.client.remove_container, c, force=True) self.client.start(c) container_mode_source = 'container:{}'.format(c['Id']) filename = 'network-mode.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], None) networks = [ n for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] assert not networks for name in ['bridge', 'host', 'none']: container = self.project.get_service(name).containers()[0] assert list(container.get('NetworkSettings.Networks')) == [name] assert container.get('HostConfig.NetworkMode') == name service_mode_source = 'container:{}'.format( self.project.get_service('bridge').containers()[0].id) service_mode_container = self.project.get_service('service').containers()[0] assert not service_mode_container.get('NetworkSettings.Networks') assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source container_mode_container = self.project.get_service('container').containers()[0] assert not container_mode_container.get('NetworkSettings.Networks') assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source @v2_only() def test_up_external_networks(self): filename = 'external-networks.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1) assert 'declared as external, but could not be found' in result.stderr networks = [ n['Name'] for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] assert not networks network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']] for name in network_names: self.client.create_network(name) self.dispatch(['-f', filename, 'up', '-d']) container = self.project.containers()[0] assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names) @v2_only() def test_up_with_external_default_network(self): filename = 'external-default.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1) assert 'declared as external, but could not be found' in result.stderr networks = [ n['Name'] for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] assert not networks network_name = 'composetest_external_network' self.client.create_network(network_name) self.dispatch(['-f', filename, 'up', '-d']) container = self.project.containers()[0] assert list(container.get('NetworkSettings.Networks')) == [network_name] @v2_only() def test_up_no_services(self): self.base_dir = 'tests/fixtures/no-services' self.dispatch(['up', '-d'], None) network_names = [ n['Name'] for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] assert network_names == [] def test_up_with_links_v1(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'web'], None) # No network was created network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) assert networks == [] web = self.project.get_service('web') db = self.project.get_service('db') console = self.project.get_service('console') # console was not started self.assertEqual(len(web.containers()), 1) self.assertEqual(len(db.containers()), 1) self.assertEqual(len(console.containers()), 0) # web has links web_container = web.containers()[0] self.assertTrue(web_container.get('HostConfig.Links')) def test_up_with_net_is_invalid(self): self.base_dir = 'tests/fixtures/net-container' result = self.dispatch( ['-f', 'v2-invalid.yml', 'up', '-d'], returncode=1) assert "Unsupported config option for services.bar: 'net'" in result.stderr def test_up_with_net_v1(self): self.base_dir = 'tests/fixtures/net-container' self.dispatch(['up', '-d'], None) bar = self.project.get_service('bar') bar_container = bar.containers()[0] foo = self.project.get_service('foo') foo_container = foo.containers()[0] assert foo_container.get('HostConfig.NetworkMode') == \ 'container:{}'.format(bar_container.id) def test_up_with_no_deps(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', '--no-deps', 'web'], None) web = self.project.get_service('web') db = self.project.get_service('db') console = self.project.get_service('console') self.assertEqual(len(web.containers()), 1) self.assertEqual(len(db.containers()), 0) self.assertEqual(len(console.containers()), 0) def test_up_with_force_recreate(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) old_ids = [c.id for c in service.containers()] self.dispatch(['up', '-d', '--force-recreate'], None) self.assertEqual(len(service.containers()), 1) new_ids = [c.id for c in service.containers()] self.assertNotEqual(old_ids, new_ids) def test_up_with_no_recreate(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) old_ids = [c.id for c in service.containers()] self.dispatch(['up', '-d', '--no-recreate'], None) self.assertEqual(len(service.containers()), 1) new_ids = [c.id for c in service.containers()] self.assertEqual(old_ids, new_ids) def test_up_with_force_recreate_and_no_recreate(self): self.dispatch( ['up', '-d', '--force-recreate', '--no-recreate'], returncode=1) def test_up_with_timeout(self): self.dispatch(['up', '-d', '-t', '1']) service = self.project.get_service('simple') another = self.project.get_service('another') self.assertEqual(len(service.containers()), 1) self.assertEqual(len(another.containers()), 1) # Ensure containers don't have stdin and stdout connected in -d mode config = service.containers()[0].inspect()['Config'] self.assertFalse(config['AttachStderr']) self.assertFalse(config['AttachStdout']) self.assertFalse(config['AttachStdin']) def test_up_handles_sigint(self): proc = start_process(self.base_dir, ['up', '-t', '2']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGINT) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_up_handles_sigterm(self): proc = start_process(self.base_dir, ['up', '-t', '2']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerCountCondition(self.project, 0)) @v2_only() def test_up_handles_force_shutdown(self): self.base_dir = 'tests/fixtures/sleeps-composefile' proc = start_process(self.base_dir, ['up', '-t', '200']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGTERM) time.sleep(0.1) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_up_handles_abort_on_container_exit(self): start_process(self.base_dir, ['up', '--abort-on-container-exit']) wait_on_condition(ContainerCountCondition(self.project, 2)) self.project.stop(['simple']) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_exec_without_tty(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'console']) self.assertEqual(len(self.project.containers()), 1) stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/']) self.assertEquals(stdout, "/\n") self.assertEquals(stderr, "") def test_exec_custom_user(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'console']) self.assertEqual(len(self.project.containers()), 1) stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami']) self.assertEquals(stdout, "operator\n") self.assertEquals(stderr, "") def test_run_service_without_links(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', 'console', '/bin/true']) self.assertEqual(len(self.project.containers()), 0) # Ensure stdin/out was open container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] config = container.inspect()['Config'] self.assertTrue(config['AttachStderr']) self.assertTrue(config['AttachStdout']) self.assertTrue(config['AttachStdin']) def test_run_service_with_links(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', 'web', '/bin/true'], None) db = self.project.get_service('db') console = self.project.get_service('console') self.assertEqual(len(db.containers()), 1) self.assertEqual(len(console.containers()), 0) @v2_only() def test_run_service_with_dependencies(self): self.base_dir = 'tests/fixtures/v2-dependencies' self.dispatch(['run', 'web', '/bin/true'], None) db = self.project.get_service('db') console = self.project.get_service('console') self.assertEqual(len(db.containers()), 1) self.assertEqual(len(console.containers()), 0) def test_run_with_no_deps(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', '--no-deps', 'web', '/bin/true']) db = self.project.get_service('db') self.assertEqual(len(db.containers()), 0) def test_run_does_not_recreate_linked_containers(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'db']) db = self.project.get_service('db') self.assertEqual(len(db.containers()), 1) old_ids = [c.id for c in db.containers()] self.dispatch(['run', 'web', '/bin/true'], None) self.assertEqual(len(db.containers()), 1) new_ids = [c.id for c in db.containers()] self.assertEqual(old_ids, new_ids) def test_run_without_command(self): self.base_dir = 'tests/fixtures/commands-composefile' self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test') self.dispatch(['run', 'implicit']) service = self.project.get_service('implicit') containers = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual( [c.human_readable_command for c in containers], [u'/bin/sh -c echo "success"'], ) self.dispatch(['run', 'explicit']) service = self.project.get_service('explicit') containers = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual( [c.human_readable_command for c in containers], [u'/bin/true'], ) def test_run_service_with_dockerfile_entrypoint(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['printf'] assert container.get('Config.Cmd') == ['default', 'args'] def test_run_service_with_dockerfile_entrypoint_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', '--entrypoint', 'echo', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert not container.get('Config.Cmd') def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == ['foo'] def test_run_service_with_compose_file_entrypoint(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['printf'] assert container.get('Config.Cmd') == ['default', 'args'] def test_run_service_with_compose_file_entrypoint_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert not container.get('Config.Cmd') def test_run_service_with_compose_file_entrypoint_and_command_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == ['foo'] def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test', '']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == [''] def test_run_service_with_user_overridden(self): self.base_dir = 'tests/fixtures/user-composefile' name = 'service' user = 'sshd' self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] self.assertEqual(user, container.get('Config.User')) def test_run_service_with_user_overridden_short_form(self): self.base_dir = 'tests/fixtures/user-composefile' name = 'service' user = 'sshd' self.dispatch(['run', '-u', user, name], returncode=1) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] self.assertEqual(user, container.get('Config.User')) def test_run_service_with_environement_overridden(self): name = 'service' self.base_dir = 'tests/fixtures/environment-composefile' self.dispatch([ 'run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo', '-e', 'alpha=beta', name, '/bin/true', ]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] # env overriden self.assertEqual('notbar', container.environment['foo']) # keep environement from yaml self.assertEqual('world', container.environment['hello']) # added option from command line self.assertEqual('beta', container.environment['alpha']) # make sure a value with a = don't crash out self.assertEqual('moto=bobo', container.environment['allo']) def test_run_service_without_map_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) port_assigned = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports self.assertEqual(port_random, None) self.assertEqual(port_assigned, None) def test_run_service_with_map_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) port_assigned = container.get_local_port(3001) port_range = container.get_local_port(3002), container.get_local_port(3003) # close all one off containers we just created container.stop() # check the ports self.assertNotEqual(port_random, None) self.assertIn("0.0.0.0", port_random) self.assertEqual(port_assigned, "0.0.0.0:49152") self.assertEqual(port_range[0], "0.0.0.0:49153") self.assertEqual(port_range[1], "0.0.0.0:49154") def test_run_service_with_explicitly_maped_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) port_full = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports self.assertEqual(port_short, "0.0.0.0:30000") self.assertEqual(port_full, "0.0.0.0:30001") def test_run_service_with_explicitly_maped_ip_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch([ 'run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple' ]) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) port_full = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports self.assertEqual(port_short, "127.0.0.1:30000") self.assertEqual(port_full, "127.0.0.1:30001") def test_run_with_expose_ports(self): # create one off container self.base_dir = 'tests/fixtures/expose-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] ports = container.ports self.assertEqual(len(ports), 9) # exposed ports are not mapped to host ports assert ports['3000/tcp'] is None assert ports['3001/tcp'] is None assert ports['3001/udp'] is None assert ports['3002/tcp'] is None assert ports['3003/tcp'] is None assert ports['3004/tcp'] is None assert ports['3005/tcp'] is None assert ports['3006/udp'] is None assert ports['3007/udp'] is None # close all one off containers we just created container.stop() def test_run_with_custom_name(self): self.base_dir = 'tests/fixtures/environment-composefile' name = 'the-container-name' self.dispatch(['run', '--name', name, 'service', '/bin/true']) service = self.project.get_service('service') container, = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual(container.name, name) def test_run_service_with_workdir_overridden(self): self.base_dir = 'tests/fixtures/run-workdir' name = 'service' workdir = '/var' self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=True)[0] self.assertEqual(workdir, container.get('Config.WorkingDir')) def test_run_service_with_workdir_overridden_short_form(self): self.base_dir = 'tests/fixtures/run-workdir' name = 'service' workdir = '/var' self.dispatch(['run', '-w', workdir, name]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=True)[0] self.assertEqual(workdir, container.get('Config.WorkingDir')) @v2_only() def test_run_interactive_connects_to_network(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d']) self.dispatch(['run', 'app', 'nslookup', 'app']) self.dispatch(['run', 'app', 'nslookup', 'db']) containers = self.project.get_service('app').containers( stopped=True, one_off=OneOffFilter.only) assert len(containers) == 2 for container in containers: networks = container.get('NetworkSettings.Networks') assert sorted(list(networks)) == [ '{}_{}'.format(self.project.name, name) for name in ['back', 'front'] ] for _, config in networks.items(): # TODO: once we drop support for API <1.24, this can be changed to: # assert config['Aliases'] == [container.short_id] aliases = set(config['Aliases'] or []) - set([container.short_id]) assert not aliases @v2_only() def test_run_detached_connects_to_network(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d']) self.dispatch(['run', '-d', 'app', 'top']) container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0] networks = container.get('NetworkSettings.Networks') assert sorted(list(networks)) == [ '{}_{}'.format(self.project.name, name) for name in ['back', 'front'] ] for _, config in networks.items(): # TODO: once we drop support for API <1.24, this can be changed to: # assert config['Aliases'] == [container.short_id] aliases = set(config['Aliases'] or []) - set([container.short_id]) assert not aliases assert self.lookup(container, 'app') assert self.lookup(container, 'db') def test_run_handles_sigint(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', 'running')) os.kill(proc.pid, signal.SIGINT) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', 'exited')) def test_run_handles_sigterm(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', 'running')) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', 'exited')) @mock.patch.dict(os.environ) def test_run_env_values_from_system(self): os.environ['FOO'] = 'bar' os.environ['BAR'] = 'baz' self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None) container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0] environment = container.get('Config.Env') assert 'FOO=bar' in environment assert 'BAR=baz' not in environment def test_rm(self): service = self.project.get_service('simple') service.create_container() kill_service(service) self.assertEqual(len(service.containers(stopped=True)), 1) self.dispatch(['rm', '--force'], None) self.assertEqual(len(service.containers(stopped=True)), 0) service = self.project.get_service('simple') service.create_container() kill_service(service) self.assertEqual(len(service.containers(stopped=True)), 1) self.dispatch(['rm', '-f'], None) self.assertEqual(len(service.containers(stopped=True)), 0) def test_rm_all(self): service = self.project.get_service('simple') service.create_container(one_off=False) service.create_container(one_off=True) kill_service(service) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1) self.dispatch(['rm', '-f'], None) self.assertEqual(len(service.containers(stopped=True)), 0) self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0) service.create_container(one_off=False) service.create_container(one_off=True) kill_service(service) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1) self.dispatch(['rm', '-f', '--all'], None) self.assertEqual(len(service.containers(stopped=True)), 0) self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0) def test_stop(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) self.dispatch(['stop', '-t', '1'], None) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertFalse(service.containers(stopped=True)[0].is_running) def test_stop_signal(self): self.base_dir = 'tests/fixtures/stop-signal-composefile' self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) self.dispatch(['stop', '-t', '1'], None) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertFalse(service.containers(stopped=True)[0].is_running) self.assertEqual(service.containers(stopped=True)[0].exit_code, 0) def test_start_no_containers(self): result = self.dispatch(['start'], returncode=1) assert 'No containers to start' in result.stderr @v2_only() def test_up_logging(self): self.base_dir = 'tests/fixtures/logging-composefile' self.dispatch(['up', '-d']) simple = self.project.get_service('simple').containers()[0] log_config = simple.get('HostConfig.LogConfig') self.assertTrue(log_config) self.assertEqual(log_config.get('Type'), 'none') another = self.project.get_service('another').containers()[0] log_config = another.get('HostConfig.LogConfig') self.assertTrue(log_config) self.assertEqual(log_config.get('Type'), 'json-file') self.assertEqual(log_config.get('Config')['max-size'], '10m') def test_up_logging_legacy(self): self.base_dir = 'tests/fixtures/logging-composefile-legacy' self.dispatch(['up', '-d']) simple = self.project.get_service('simple').containers()[0] log_config = simple.get('HostConfig.LogConfig') self.assertTrue(log_config) self.assertEqual(log_config.get('Type'), 'none') another = self.project.get_service('another').containers()[0] log_config = another.get('HostConfig.LogConfig') self.assertTrue(log_config) self.assertEqual(log_config.get('Type'), 'json-file') self.assertEqual(log_config.get('Config')['max-size'], '10m') def test_pause_unpause(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertFalse(service.containers()[0].is_paused) self.dispatch(['pause'], None) self.assertTrue(service.containers()[0].is_paused) self.dispatch(['unpause'], None) self.assertFalse(service.containers()[0].is_paused) def test_pause_no_containers(self): result = self.dispatch(['pause'], returncode=1) assert 'No containers to pause' in result.stderr def test_unpause_no_containers(self): result = self.dispatch(['unpause'], returncode=1) assert 'No containers to unpause' in result.stderr def test_logs_invalid_service_name(self): self.dispatch(['logs', 'madeupname'], returncode=1) def test_logs_follow(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up', '-d']) result = self.dispatch(['logs', '-f']) assert result.stdout.count('\n') == 5 assert 'simple' in result.stdout assert 'another' in result.stdout assert 'exited with code 0' in result.stdout def test_logs_follow_logs_from_new_containers(self): self.base_dir = 'tests/fixtures/logs-composefile' self.dispatch(['up', '-d', 'simple']) proc = start_process(self.base_dir, ['logs', '-f']) self.dispatch(['up', '-d', 'another']) wait_on_condition(ContainerStateCondition( self.project.client, 'logscomposefile_another_1', 'exited')) self.dispatch(['kill', 'simple']) result = wait_on_process(proc) assert 'hello' in result.stdout assert 'test' in result.stdout assert 'logscomposefile_another_1 exited with code 0' in result.stdout assert 'logscomposefile_simple_1 exited with code 137' in result.stdout def test_logs_default(self): self.base_dir = 'tests/fixtures/logs-composefile' self.dispatch(['up', '-d']) result = self.dispatch(['logs']) assert 'hello' in result.stdout assert 'test' in result.stdout assert 'exited with' not in result.stdout def test_logs_on_stopped_containers_exits(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up']) result = self.dispatch(['logs']) assert 'simple' in result.stdout assert 'another' in result.stdout assert 'exited with' not in result.stdout def test_logs_timestamps(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up', '-d']) result = self.dispatch(['logs', '-f', '-t']) self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})') def test_logs_tail(self): self.base_dir = 'tests/fixtures/logs-tail-composefile' self.dispatch(['up']) result = self.dispatch(['logs', '--tail', '2']) assert result.stdout.count('\n') == 3 def test_kill(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) self.dispatch(['kill'], None) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertFalse(service.containers(stopped=True)[0].is_running) def test_kill_signal_sigstop(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) self.dispatch(['kill', '-s', 'SIGSTOP'], None) self.assertEqual(len(service.containers()), 1) # The container is still running. It has only been paused self.assertTrue(service.containers()[0].is_running) def test_kill_stopped_service(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.dispatch(['kill', '-s', 'SIGSTOP'], None) self.assertTrue(service.containers()[0].is_running) self.dispatch(['kill', '-s', 'SIGKILL'], None) self.assertEqual(len(service.containers(stopped=True)), 1) self.assertFalse(service.containers(stopped=True)[0].is_running) def test_restart(self): service = self.project.get_service('simple') container = service.create_container() service.start_container(container) started_at = container.dictionary['State']['StartedAt'] self.dispatch(['restart', '-t', '1'], None) container.inspect() self.assertNotEqual( container.dictionary['State']['FinishedAt'], '0001-01-01T00:00:00Z', ) self.assertNotEqual( container.dictionary['State']['StartedAt'], started_at, ) def test_restart_stopped_container(self): service = self.project.get_service('simple') container = service.create_container() container.start() container.kill() self.assertEqual(len(service.containers(stopped=True)), 1) self.dispatch(['restart', '-t', '1'], None) self.assertEqual(len(service.containers(stopped=False)), 1) def test_restart_no_containers(self): result = self.dispatch(['restart'], returncode=1) assert 'No containers to restart' in result.stderr def test_scale(self): project = self.project self.dispatch(['scale', 'simple=1']) self.assertEqual(len(project.get_service('simple').containers()), 1) self.dispatch(['scale', 'simple=3', 'another=2']) self.assertEqual(len(project.get_service('simple').containers()), 3) self.assertEqual(len(project.get_service('another').containers()), 2) self.dispatch(['scale', 'simple=1', 'another=1']) self.assertEqual(len(project.get_service('simple').containers()), 1) self.assertEqual(len(project.get_service('another').containers()), 1) self.dispatch(['scale', 'simple=1', 'another=1']) self.assertEqual(len(project.get_service('simple').containers()), 1) self.assertEqual(len(project.get_service('another').containers()), 1) self.dispatch(['scale', 'simple=0', 'another=0']) self.assertEqual(len(project.get_service('simple').containers()), 0) self.assertEqual(len(project.get_service('another').containers()), 0) def test_port(self): self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['up', '-d'], None) container = self.project.get_service('simple').get_container() def get_port(number): result = self.dispatch(['port', 'simple', str(number)]) return result.stdout.rstrip() self.assertEqual(get_port(3000), container.get_local_port(3000)) self.assertEqual(get_port(3001), "0.0.0.0:49152") self.assertEqual(get_port(3002), "0.0.0.0:49153") def test_port_with_scale(self): self.base_dir = 'tests/fixtures/ports-composefile-scale' self.dispatch(['scale', 'simple=2'], None) containers = sorted( self.project.containers(service_names=['simple']), key=attrgetter('name')) def get_port(number, index=None): if index is None: result = self.dispatch(['port', 'simple', str(number)]) else: result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)]) return result.stdout.rstrip() self.assertEqual(get_port(3000), containers[0].get_local_port(3000)) self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000)) self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000)) self.assertEqual(get_port(3002), "") def test_events_json(self): events_proc = start_process(self.base_dir, ['events', '--json']) self.dispatch(['up', '-d']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')] assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2} def test_events_human_readable(self): def has_timestamp(string): str_iso_date, str_iso_time, container_info = string.split(' ', 2) try: return isinstance(datetime.datetime.strptime( '%s %s' % (str_iso_date, str_iso_time), '%Y-%m-%d %H:%M:%S.%f'), datetime.datetime) except ValueError: return False events_proc = start_process(self.base_dir, ['events']) self.dispatch(['up', '-d', 'simple']) wait_on_condition(ContainerCountCondition(self.project, 1)) os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = result.stdout.rstrip().split('\n') assert len(lines) == 2 container, = self.project.containers() expected_template = ( ' container {} {} (image=busybox:latest, ' 'name=simplecomposefile_simple_1)') assert expected_template.format('create', container.id) in lines[0] assert expected_template.format('start', container.id) in lines[1] assert has_timestamp(lines[0]) def test_env_file_relative_to_compose_file(self): config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml') self.dispatch(['-f', config_path, 'up', '-d'], None) self._project = get_project(self.base_dir, [config_path]) containers = self.project.containers(stopped=True) self.assertEqual(len(containers), 1) self.assertIn("FOO=1", containers[0].get('Config.Env')) @mock.patch.dict(os.environ) def test_home_and_env_var_in_volume_path(self): os.environ['VOLUME_NAME'] = 'my-volume' os.environ['HOME'] = '/tmp/home-dir' self.base_dir = 'tests/fixtures/volume-path-interpolation' self.dispatch(['up', '-d'], None) container = self.project.containers(stopped=True)[0] actual_host_path = container.get_mount('/container-path')['Source'] components = actual_host_path.split('/') assert components[-2:] == ['home-dir', 'my-volume'] def test_up_with_default_override_file(self): self.base_dir = 'tests/fixtures/override-files' self.dispatch(['up', '-d'], None) containers = self.project.containers() self.assertEqual(len(containers), 2) web, db = containers self.assertEqual(web.human_readable_command, 'top') self.assertEqual(db.human_readable_command, 'top') def test_up_with_multiple_files(self): self.base_dir = 'tests/fixtures/override-files' config_paths = [ 'docker-compose.yml', 'docker-compose.override.yml', 'extra.yml', ] self._project = get_project(self.base_dir, config_paths) self.dispatch( [ '-f', config_paths[0], '-f', config_paths[1], '-f', config_paths[2], 'up', '-d', ], None) containers = self.project.containers() self.assertEqual(len(containers), 3) web, other, db = containers self.assertEqual(web.human_readable_command, 'top') self.assertTrue({'db', 'other'} <= set(get_links(web))) self.assertEqual(db.human_readable_command, 'top') self.assertEqual(other.human_readable_command, 'top') def test_up_with_extends(self): self.base_dir = 'tests/fixtures/extends' self.dispatch(['up', '-d'], None) self.assertEqual( set([s.name for s in self.project.services]), set(['mydb', 'myweb']), ) # Sort by name so we get [db, web] containers = sorted( self.project.containers(stopped=True), key=lambda c: c.name, ) self.assertEqual(len(containers), 2) web = containers[1] self.assertEqual( set(get_links(web)), set(['db', 'mydb_1', 'extends_mydb_1'])) expected_env = set([ "FOO=1", "BAR=2", "BAZ=2", ]) self.assertTrue(expected_env <= set(web.get('Config.Env')))
[]
[]
[ "FOO", "DOCKER_HOST", "BAR", "VOLUME_NAME", "HOME" ]
[]
["FOO", "DOCKER_HOST", "BAR", "VOLUME_NAME", "HOME"]
python
5
0
config_test.go
package config import ( "os" "testing" ) func TestConfig_Init(t *testing.T) { os.Setenv("TEST_NAME", "helloworld") appDir := os.Getenv("GOPATH") + "/src/go.zhuzi.me/config/example/" fileParser := NewFileParser(true, appDir) fileParser.Debug = true if err := Init(true, fileParser); err != nil { t.Error(err) } version := String(Data("test").Get("app", "version")) if "v2.1" != version { t.Error("debug 模式下读取的 version 不对,version:", version) } fileParser.Debug = false if err := Init(false, fileParser); err != nil { t.Error(err) } version = String(Data("test").Get("app", "version")) if "v2.0" != version { t.Error("release 模式下读取的 version 不对,version:", version) } // test v2 fileParser = NewFileParserV2(true, appDir) Init(true, fileParser) version = String(Data("test_debug").Get("app", "version")) if "v2.1" != version { t.Error("v2模式下读取test_debug.ini的version值不正确,version:", version) } version = String(Data("test").Get("app", "version")) if "v2.0" != version { t.Error("v2模式下读取test.ini的version值不正确,version:", version) } // 测试环境变量替换 fileParser = NewFileParserV1(true, appDir) Init(true, fileParser) if envVal := String(Data("test").Get("app", "env")); envVal != "" { if envVal != os.Getenv("TEST_NAME") { t.Error("环境变量读取失败,env:", envVal, ",right:", os.Getenv("TEST_NAME")) } } os.Clearenv() Init(true, fileParser) if envVal := String(Data("test").Get("app", "env")); envVal != "helloworld" { t.Error("读取环境变量默认值失败,env:", envVal, ",right:helloworld") } if envVal2 := String(Data("test").Get("app", "env2")); envVal2 != "" { t.Error("读取不存在的环境变量失败,env2:", envVal2, ",right:空值") } }
[ "\"GOPATH\"", "\"TEST_NAME\"", "\"TEST_NAME\"" ]
[]
[ "GOPATH", "TEST_NAME" ]
[]
["GOPATH", "TEST_NAME"]
go
2
0
providers/steam/steam_test.go
package steam_test import ( "github.com/viddsee/goth" "github.com/viddsee/goth/providers/steam" "github.com/stretchr/testify/assert" "os" "testing" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() a.Equal(p.APIKey, os.Getenv("STEAM_KEY")) a.Equal(p.CallbackURL, "/foo") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), provider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*steam.Session) a.NoError(err) a.Contains(s.AuthURL, "steamcommunity.com/openid/login") a.Contains(s.AuthURL, "foo") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.UnmarshalSession(`{"AuthURL":"https://steamcommunity.com/openid/login?openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.realm=%3A%2F%2F&openid.return_to=%2Ffoo","SteamID":"1234567890","CallbackURL":"http://localhost:3030/","ResponseNonce":"2016-03-13T16:56:30ZJ8tlKVquwHi9ZSPV4ElU5PY2dmI="}`) a.NoError(err) s := session.(*steam.Session) a.Equal(s.AuthURL, "https://steamcommunity.com/openid/login?openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.realm=%3A%2F%2F&openid.return_to=%2Ffoo") a.Equal(s.CallbackURL, "http://localhost:3030/") a.Equal(s.SteamID, "1234567890") a.Equal(s.ResponseNonce, "2016-03-13T16:56:30ZJ8tlKVquwHi9ZSPV4ElU5PY2dmI=") } func provider() *steam.Provider { return steam.New(os.Getenv("STEAM_KEY"), "/foo") }
[ "\"STEAM_KEY\"", "\"STEAM_KEY\"" ]
[]
[ "STEAM_KEY" ]
[]
["STEAM_KEY"]
go
1
0
zdemo/zdemo/wsgi.py
""" WSGI config for zdemo project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zdemo.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
contrib/spendfrom/spendfrom.py
#!/usr/bin/env python # Copyright (c) 2013 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the zeuscoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Zeuscoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Zeuscoin") return os.path.expanduser("~/.zeuscoin") def read_bitcoin_config(dbdir): """Read the zeuscoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "zeuscoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a zeuscoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19342 if testnet else 9342 connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get zeuscoins from") parser.add_option("--to", dest="to", default=None, help="address to get send zeuscoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of zeuscoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
[]
[]
[ "APPDATA" ]
[]
["APPDATA"]
python
1
0
firestore/firestore_snippets/listen_test.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bytes" "context" "io/ioutil" "log" "os" "strings" "testing" "time" "cloud.google.com/go/firestore" "github.com/GoogleCloudPlatform/golang-samples/internal/testutil" ) var duration time.Duration = 15 * time.Second func setup(ctx context.Context, t *testing.T) (*firestore.Client, string, string) { tc := testutil.SystemTest(t) projectID := os.Getenv("GOLANG_SAMPLES_FIRESTORE_PROJECT") if projectID == "" { t.Skip("Skipping firestore test. Set GOLANG_SAMPLES_FIRESTORE_PROJECT.") } collection := tc.ProjectID + "-collection-cities" client, err := firestore.NewClient(ctx, projectID) if err != nil { t.Fatalf("firestore.NewClient: %v", err) } return client, projectID, collection } func TestListen(t *testing.T) { ctx := context.Background() client, projectID, collection := setup(ctx, t) defer client.Close() ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() // Delete all docs first to make sure setup works. docs, err := client.Collection(collection).Documents(ctx).GetAll() if err == nil { for _, doc := range docs { doc.Ref.Delete(ctx) } } cityCollection := []struct { city, name, state string }{ {city: "SF", name: "San Francisco", state: "CA"}, {city: "LA", name: "Los Angeles", state: "CA"}, {city: "DC", name: "Washington D.C."}, } for _, c := range cityCollection { if _, err := client.Collection(collection).Doc(c.city).Set(ctx, map[string]string{ "name": c.name, "state": c.state, }); err != nil { t.Fatalf("Set: %v", err) } } if err := listenDocument(ctx, ioutil.Discard, projectID, collection); err != nil { t.Errorf("listenDocument: %v", err) } } func TestListenMultiple(t *testing.T) { ctx := context.Background() client, projectID, collection := setup(ctx, t) defer client.Close() ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() if err := listenMultiple(ctx, ioutil.Discard, projectID, collection); err != nil { t.Errorf("listenMultiple: %v", err) } } func TestListenChanges(t *testing.T) { ctx := context.Background() client, projectID, collection := setup(ctx, t) defer client.Close() ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() buf := &bytes.Buffer{} c := make(chan *bytes.Buffer) go func() { defer close(c) err := listenChanges(ctx, buf, projectID, collection) if err != nil { t.Errorf("listenChanges: %v", err) } c <- buf }() // Add some changes to data in parallel. time.Sleep(time.Second) var pop int64 = 3900000 if _, err := client.Collection(collection).Doc("LA").Update(ctx, []firestore.Update{ {Path: "population", Value: pop}, }); err != nil { log.Fatalf("Doc.Update: %v", err) } <-c testutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) { // While the content is likely here, it is possible the update event // hasn't been observed yet. Retry a few times. want := "population:3900000" if got := buf.String(); !strings.Contains(got, want) { r.Errorf("listenChanges got\n----\n%s\n----\nWant to contain:\n----\n%s\n----", got, want) } }) } func TestListenErrors(t *testing.T) { ctx := context.Background() client, projectID, collection := setup(ctx, t) defer client.Close() ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() if err := listenErrors(ctx, ioutil.Discard, projectID, collection); err != nil { t.Errorf("listenErrors: %v", err) } }
[ "\"GOLANG_SAMPLES_FIRESTORE_PROJECT\"" ]
[]
[ "GOLANG_SAMPLES_FIRESTORE_PROJECT" ]
[]
["GOLANG_SAMPLES_FIRESTORE_PROJECT"]
go
1
0
vendor/github.com/varlink/go/cmd/varlink/main.go
package main import ( "encoding/json" "flag" "fmt" "github.com/TylerBrock/colorjson" "github.com/fatih/color" "github.com/varlink/go/varlink" "os" "strings" ) var bold = color.New(color.Bold) var errorBoldRed = bold.Sprint(color.New(color.FgRed).Sprint("Error:")) var bridge string func ErrPrintf(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, "%s ", errorBoldRed) fmt.Fprintf(os.Stderr, format, a...) } func print_usage(set *flag.FlagSet, arg_help string) { if set == nil { fmt.Fprintf(os.Stderr, "Usage: %s [GLOBAL OPTIONS] COMMAND ...\n", os.Args[0]) } else { fmt.Fprintf(os.Stderr, "Usage: %s [GLOBAL OPTIONS] %s [OPTIONS] %s\n", os.Args[0], set.Name(), arg_help) } fmt.Fprintln(os.Stderr, "\nGlobal Options:") flag.PrintDefaults() if set == nil { fmt.Fprintln(os.Stderr, "\nCommands:") fmt.Fprintln(os.Stderr, " info\tPrint information about a service") fmt.Fprintln(os.Stderr, " help\tPrint interface description or service information") fmt.Fprintln(os.Stderr, " call\tCall a method") } else { fmt.Fprintln(os.Stderr, "\nOptions:") set.PrintDefaults() } os.Exit(1) } func varlink_call(args []string) { var err error var oneway bool callFlags := flag.NewFlagSet("help", flag.ExitOnError) callFlags.BoolVar(&oneway, "-oneway", false, "Use bridge for connection") var help bool callFlags.BoolVar(&help, "help", false, "Prints help information") var usage = func() { print_usage(callFlags, "<[ADDRESS/]INTERFACE.METHOD> [ARGUMENTS]") } callFlags.Usage = usage _ = callFlags.Parse(args) if help { usage() } var con *varlink.Connection var address string var methodName string if len(bridge) != 0 { con, err = varlink.NewBridge(bridge) if err != nil { ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err) os.Exit(2) } address = "bridge:" + bridge methodName = callFlags.Arg(0) } else { uri := callFlags.Arg(0) if uri == "" { usage() } li := strings.LastIndex(uri, "/") if li == -1 { ErrPrintf("Invalid address '%s'\n", uri) os.Exit(2) } address = uri[:li] methodName = uri[li+1:] con, err = varlink.NewConnection(address) if err != nil { ErrPrintf("Cannot connect to '%s': %v\n", address, err) os.Exit(2) } } var parameters string var params json.RawMessage parameters = callFlags.Arg(1) if parameters == "" { params = nil } else { json.Unmarshal([]byte(parameters), &params) } var flags uint64 flags = 0 if oneway { flags |= varlink.Oneway } recv, err := con.Send(methodName, params, flags) var retval map[string]interface{} // FIXME: Use cont _, err = recv(&retval) f := colorjson.NewFormatter() f.Indent = 2 f.KeyColor = color.New(color.FgCyan) f.StringColor = color.New(color.FgMagenta) f.NumberColor = color.New(color.FgMagenta) f.BoolColor = color.New(color.FgMagenta) f.NullColor = color.New(color.FgMagenta) if err != nil { ErrPrintf("Error calling '%s': %v\n", methodName, err) switch e := err.(type) { case *varlink.Error: println(e.Name) errorRawParameters := e.Parameters.(*json.RawMessage) if errorRawParameters == nil { break } var param map[string]interface{} _ = json.Unmarshal(*errorRawParameters, &param) c, _ := f.Marshal(param) ErrPrintf("%v\n", string(c)) } os.Exit(2) } c, _ := f.Marshal(retval) fmt.Println(string(c)) } func varlink_help(args []string) { var err error helpFlags := flag.NewFlagSet("help", flag.ExitOnError) var help bool helpFlags.BoolVar(&help, "help", false, "Prints help information") var usage = func() { print_usage(helpFlags, "<[ADDRESS/]INTERFACE>") } helpFlags.Usage = usage _ = helpFlags.Parse(args) if help { usage() } var con *varlink.Connection var address string var interfaceName string if len(bridge) != 0 { con, err = varlink.NewBridge(bridge) if err != nil { ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err) os.Exit(2) } address = "bridge:" + bridge interfaceName = helpFlags.Arg(0) } else { uri := helpFlags.Arg(0) if uri == "" && bridge == "" { ErrPrintf("No ADDRESS or activation or bridge\n\n") usage() } li := strings.LastIndex(uri, "/") if li == -1 { ErrPrintf("Invalid address '%s'\n", uri) os.Exit(2) } address = uri[:li] con, err = varlink.NewConnection(address) if err != nil { ErrPrintf("Cannot connect to '%s': %v\n", address, err) os.Exit(2) } interfaceName = uri[li+1:] } description, err := con.GetInterfaceDescription(interfaceName) if err != nil { ErrPrintf("Cannot get interface description for '%s': %v\n", interfaceName, err) os.Exit(2) } fmt.Println(description) } func varlink_info(args []string) { var err error infoFlags := flag.NewFlagSet("info", flag.ExitOnError) var help bool infoFlags.BoolVar(&help, "help", false, "Prints help information") var usage = func() { print_usage(infoFlags, "[ADDRESS]") } infoFlags.Usage = usage _ = infoFlags.Parse(args) if help { usage() } var con *varlink.Connection var address string if len(bridge) != 0 { con, err = varlink.NewBridge(bridge) if err != nil { ErrPrintf("Cannot connect with bridge '%s': %v\n", bridge, err) os.Exit(2) } address = "bridge:" + bridge } else { address = infoFlags.Arg(0) if address == "" && bridge == "" { ErrPrintf("No ADDRESS or activation or bridge\n\n") usage() } con, err = varlink.NewConnection(address) if err != nil { ErrPrintf("Cannot connect to '%s': %v\n", address, err) os.Exit(2) } } var vendor, product, version, url string var interfaces []string err = con.GetInfo(&vendor, &product, &version, &url, &interfaces) if err != nil { ErrPrintf("Cannot get info for '%s': %v\n", address, err) os.Exit(2) } fmt.Printf("%s %s\n", bold.Sprint("Vendor:"), vendor) fmt.Printf("%s %s\n", bold.Sprint("Product:"), product) fmt.Printf("%s %s\n", bold.Sprint("Version:"), version) fmt.Printf("%s %s\n", bold.Sprint("URL:"), url) fmt.Printf("%s\n %s\n\n", bold.Sprint("Interfaces:"), strings.Join(interfaces[:], "\n ")) } func main() { var debug bool var colorMode string flag.CommandLine.Usage = func() { print_usage(nil, "") } flag.BoolVar(&debug, "debug", false, "Enable debug output") flag.StringVar(&bridge, "bridge", "", "Use bridge for connection") flag.StringVar(&colorMode, "color", "auto", "colorize output [default: auto] [possible values: on, off, auto]") flag.Parse() if colorMode != "on" && (os.Getenv("TERM") == "" || colorMode == "off") { color.NoColor = true // disables colorized output } switch flag.Arg(0) { case "info": varlink_info(flag.Args()[1:]) case "help": varlink_help(flag.Args()[1:]) case "call": varlink_call(flag.Args()[1:]) default: print_usage(nil, "") } }
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
cmd/flexera2nvd/flexera2nvd.go
// Copyright (c) Facebook, Inc. and its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "encoding/json" "fmt" "io" "os" "strings" "github.com/facebookincubator/flog" "github.com/facebookincubator/nvdtools/providers/flexera/api" "github.com/facebookincubator/nvdtools/providers/flexera/schema" "github.com/facebookincubator/nvdtools/providers/lib/client" "github.com/facebookincubator/nvdtools/providers/lib/runner" ) func Read(r io.Reader, c chan runner.Convertible) error { var vulns map[string]*schema.Advisory if err := json.NewDecoder(r).Decode(&vulns); err != nil { return fmt.Errorf("can't decode into vulns: %v", err) } for _, vuln := range vulns { c <- vuln } return nil } func FetchSince(ctx context.Context, c client.Client, baseURL string, since int64) (<-chan runner.Convertible, error) { apiKey := os.Getenv("FLEXERA_TOKEN") if apiKey == "" { return nil, fmt.Errorf("please set FLEXERA_TOKEN in environment") } if !strings.HasPrefix(apiKey, "Token ") { apiKey = "Token " + apiKey } client := api.NewClient(c, baseURL, apiKey) return client.FetchAllVulnerabilities(ctx, since) } func main() { r := runner.Runner{ Config: runner.Config{ BaseURL: "https://api.app.secunia.com", ClientConfig: client.Config{ UserAgent: "flexera2nvd", }, }, FetchSince: FetchSince, Read: Read, } if err := r.Run(); err != nil { flog.Fatalln(err) } }
[ "\"FLEXERA_TOKEN\"" ]
[]
[ "FLEXERA_TOKEN" ]
[]
["FLEXERA_TOKEN"]
go
1
0
test/extended/util/framework.go
package util import ( "encoding/json" "fmt" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "time" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/batch" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kbatchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/selection" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" buildapi "github.com/openshift/origin/pkg/build/api" "github.com/openshift/origin/pkg/client" deployapi "github.com/openshift/origin/pkg/deploy/api" deployutil "github.com/openshift/origin/pkg/deploy/util" imageapi "github.com/openshift/origin/pkg/image/api" "github.com/openshift/origin/pkg/util/namer" ) const pvPrefix = "pv-" // WaitForOpenShiftNamespaceImageStreams waits for the standard set of imagestreams to be imported func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error { langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"} scan := func() bool { for _, lang := range langs { is, err := oc.Client().ImageStreams("openshift").Get(lang) if err != nil { return false } for tag := range is.Spec.Tags { if _, ok := is.Status.Tags[tag]; !ok { return false } } } return true } success := false for i := 0; i < 10; i++ { success = scan() if success { break } time.Sleep(3 * time.Second) } if success { return nil } DumpImageStreams(oc) return fmt.Errorf("Failed to import expected imagestreams") } // CheckOpenShiftNamespaceImageStreams is a temporary workaround for the intermittent // issue seen in extended tests where *something* is deleteing the pre-loaded, languange // imagestreams from the OpenShift namespace func CheckOpenShiftNamespaceImageStreams(oc *CLI) { missing := false langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"} for _, lang := range langs { _, err := oc.Client().ImageStreams("openshift").Get(lang) if err != nil { missing = true break } } if missing { fmt.Fprint(g.GinkgoWriter, "\n\n openshift namespace image streams corrupted \n\n") DumpImageStreams(oc) out, err := oc.Run("get").Args("is", "-n", "openshift", "--config", KubeConfigPath()).Output() err = fmt.Errorf("something has tampered with the image streams in the openshift namespace; look at audits in master log; \n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred()) } else { fmt.Fprint(g.GinkgoWriter, "\n\n openshift namespace image streams OK \n\n") } } //DumpImageStreams will dump both the openshift namespace and local namespace imagestreams // as part of debugging when the language imagestreams in the openshift namespace seem to disappear func DumpImageStreams(oc *CLI) { out, err := oc.Run("get").Args("is", "-n", "openshift", "--config", KubeConfigPath()).Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n imagestreams in openshift namespace: \n%s\n", out) } else { fmt.Fprintf(g.GinkgoWriter, "\n error on getting imagestreams in openshift namespace: %+v\n", err) } out, err = oc.Run("get").Args("is").Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n imagestreams in dynamic test namespace: \n%s\n", out) } else { fmt.Fprintf(g.GinkgoWriter, "\n error on getting imagestreams in dynamic test namespace: %+v\n", err) } ids, err := ListImages() if err != nil { fmt.Fprintf(g.GinkgoWriter, "\n got error on docker images %+v\n", err) } else { for _, id := range ids { fmt.Fprintf(g.GinkgoWriter, " found local image %s\n", id) } } } func DumpNamedBuildLogs(buildName string, oc *CLI) { buildOuput, err := oc.Run("logs").Args("-f", "build/"+buildName, "--timestamps").Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n build logs for %s: %s\n\n", buildName, buildOuput) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on build logs for %s: %+v\n\n", buildName, err) } } // DumpBuildLogs will dump the latest build logs for a BuildConfig for debug purposes func DumpBuildLogs(bc string, oc *CLI) { buildOuput, err := oc.Run("logs").Args("-f", "bc/"+bc, "--timestamps").Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n build logs : %s\n\n", buildOuput) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on build logs %+v\n\n", err) } // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts ExamineDiskUsage() ExaminePodDiskUsage(oc) } func GetDeploymentConfigPods(oc *CLI, dcName string) (*kapi.PodList, error) { return oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName))}) } // DumpDeploymentLogs will dump the latest deployment logs for a DeploymentConfig for debug purposes func DumpDeploymentLogs(dc string, oc *CLI) { fmt.Fprintf(g.GinkgoWriter, "\n\nDumping logs for deploymentconfig %q in namespace %q\n\n", dc, oc.Namespace()) pods, err := GetDeploymentConfigPods(oc, dc) if err != nil { fmt.Fprintf(g.GinkgoWriter, "\n\nUnable to retrieve logs for deploymentconfig %q: %+v\n\n", dc, err) return } if pods == nil || pods.Items == nil { fmt.Fprintf(g.GinkgoWriter, "\n\nUnable to retrieve logs for deploymentconfig %q. No pods found: %+v\n\n", dc, pods) return } for _, pod := range pods.Items { podName := pod.ObjectMeta.Name fmt.Fprintf(g.GinkgoWriter, "\n\nDescribing deploymentconfig %q pod %q\n", dc, podName) descOutput, err := oc.Run("describe").Args("pod/" + podName).Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "%s\n\n", descOutput) } else { fmt.Fprintf(g.GinkgoWriter, "Error retrieving pod description: %v\n\n", err) } fmt.Fprintf(g.GinkgoWriter, "\n\nLog for deploymentconfig %q pod %q\n---->\n", dc, podName) depOutput, err := oc.Run("logs").Args("pod/" + podName).Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "%s\n<----end of log for %q\n", depOutput, podName) } else { fmt.Fprintf(g.GinkgoWriter, "\n<----unable to retrieve logs: %v\n", err) } } } // ExamineDiskUsage will dump df output on the testing system; leveraging this as part of diagnosing // the registry's disk filling up during external tests on jenkins func ExamineDiskUsage() { out, err := exec.Command("/bin/df", "-m").Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n df -m output: %s\n\n", string(out)) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on df %v\n\n", err) } out, err = exec.Command("/bin/docker", "info").Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n docker info output: \n%s\n\n", string(out)) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on docker inspect %v\n\n", err) } } // ExaminePodDiskUsage will dump df/du output on registry pod; leveraging this as part of diagnosing // the registry's disk filling up during external tests on jenkins func ExaminePodDiskUsage(oc *CLI) { out, err := oc.Run("get").Args("pods", "-o", "json", "-n", "default", "--config", KubeConfigPath()).Output() var podName string if err == nil { b := []byte(out) var list kapi.PodList err = json.Unmarshal(b, &list) if err == nil { for _, pod := range list.Items { fmt.Fprintf(g.GinkgoWriter, "\n\n looking at pod %s \n\n", pod.ObjectMeta.Name) if strings.Contains(pod.ObjectMeta.Name, "docker-registry-") && !strings.Contains(pod.ObjectMeta.Name, "deploy") { podName = pod.ObjectMeta.Name break } } } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got json unmarshal err: %v\n\n", err) } } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on get pods: %v\n\n", err) } out, err = oc.Run("exec").Args("-n", "default", podName, "df", "--config", KubeConfigPath()).Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n df from registry pod: \n%s\n\n", out) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on reg pod df: %v\n", err) } out, err = oc.Run("exec").Args("-n", "default", podName, "du", "/registry", "--config", KubeConfigPath()).Output() if err == nil { fmt.Fprintf(g.GinkgoWriter, "\n\n du from registry pod: \n%s\n\n", out) } else { fmt.Fprintf(g.GinkgoWriter, "\n\n got error on reg pod du: %v\n", err) } } // WriteObjectToFile writes the JSON representation of runtime.Object into a temporary // file. func WriteObjectToFile(obj runtime.Object, filename string) error { content, err := runtime.Encode(kapi.Codecs.LegacyCodec(registered.EnabledVersions()...), obj) if err != nil { return err } return ioutil.WriteFile(filename, []byte(content), 0644) } // VarSubOnFile reads in srcFile, finds instances of ${key} from the map // and replaces them with their associated values. func VarSubOnFile(srcFile string, destFile string, vars map[string]string) error { srcData, err := ioutil.ReadFile(srcFile) if err == nil { srcString := string(srcData) for k, v := range vars { k = "${" + k + "}" srcString = strings.Replace(srcString, k, v, -1) // -1 means unlimited replacements } err = ioutil.WriteFile(destFile, []byte(srcString), 0644) } return err } // StartBuild executes OC start-build with the specified arguments. StdOut and StdErr from the process // are returned as separate strings. func StartBuild(oc *CLI, args ...string) (stdout, stderr string, err error) { stdout, stderr, err = oc.Run("start-build").Args(args...).Outputs() fmt.Fprintf(g.GinkgoWriter, "\n\nstart-build output with args %v:\nError>%v\nStdOut>\n%s\nStdErr>\n%s\n\n", args, err, stdout, stderr) return stdout, stderr, err } var buildPathPattern = regexp.MustCompile(`^build/([\w\-\._]+)$`) type BuildResult struct { // BuildPath is a resource qualified name (e.g. "build/test-1"). BuildPath string // StartBuildStdErr is the StdErr output generated by oc start-build. StartBuildStdErr string // StartBuildStdOut is the StdOut output generated by oc start-build. StartBuildStdOut string // StartBuildErr is the error, if any, returned by the direct invocation of the start-build command. StartBuildErr error // The buildconfig which generated this build. BuildConfigName string // Build is the resource created. May be nil if there was a timeout. Build *buildapi.Build // BuildAttempt represents that a Build resource was created. // false indicates a severe error unrelated to Build success or failure. BuildAttempt bool // BuildSuccess is true if the build was finshed successfully. BuildSuccess bool // BuildFailure is true if the build was finished with an error. BuildFailure bool // BuildTimeout is true if there was a timeout waiting for the build to finish. BuildTimeout bool // The openshift client which created this build. oc *CLI } // DumpLogs sends logs associated with this BuildResult to the GinkgoWriter. func (t *BuildResult) DumpLogs() { fmt.Fprintf(g.GinkgoWriter, "\n\n*****************************************\n") fmt.Fprintf(g.GinkgoWriter, "Dumping Build Result: %#v\n", *t) if t == nil { fmt.Fprintf(g.GinkgoWriter, "No build result available!\n\n") return } desc, err := t.oc.Run("describe").Args(t.BuildPath).Output() fmt.Fprintf(g.GinkgoWriter, "\n** Build Description:\n") if err != nil { fmt.Fprintf(g.GinkgoWriter, "Error during description retrieval: %+v\n", err) } else { fmt.Fprintf(g.GinkgoWriter, "%s\n", desc) } fmt.Fprintf(g.GinkgoWriter, "\n** Build Logs:\n") buildOuput, err := t.Logs() if err != nil { fmt.Fprintf(g.GinkgoWriter, "Error during log retrieval: %+v\n", err) } else { fmt.Fprintf(g.GinkgoWriter, "%s\n", buildOuput) } fmt.Fprintf(g.GinkgoWriter, "\n\n") // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts //ExamineDiskUsage() //ExaminePodDiskUsage(t.oc) fmt.Fprintf(g.GinkgoWriter, "\n\n") } // Logs returns the logs associated with this build. func (t *BuildResult) Logs() (string, error) { if t == nil || t.BuildPath == "" { return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t) } buildOuput, err := t.oc.Run("logs").Args("-f", t.BuildPath, "--timestamps").Output() if err != nil { return "", fmt.Errorf("Error retrieving logs for %#v: %v", *t, err) } return buildOuput, nil } // Dumps logs and triggers a Ginkgo assertion if the build did NOT succeed. func (t *BuildResult) AssertSuccess() *BuildResult { if !t.BuildSuccess { t.DumpLogs() } o.ExpectWithOffset(1, t.BuildSuccess).To(o.BeTrue()) return t } // Dumps logs and triggers a Ginkgo assertion if the build did NOT have an error (this will not assert on timeouts) func (t *BuildResult) AssertFailure() *BuildResult { if !t.BuildFailure { t.DumpLogs() } o.ExpectWithOffset(1, t.BuildFailure).To(o.BeTrue()) return t } // StartBuildAndWait executes OC start-build with the specified arguments on an existing buildconfig. // Note that start-build will be run with "-o=name" as a parameter when using this method. // If no error is returned from this method, it means that the build attempted successfully, NOT that // the build completed. For completion information, check the BuildResult object. func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error) { args = append(args, "-o=name") // ensure that the build name is the only thing send to stdout stdout, stderr, err := StartBuild(oc, args...) // Usually, with -o=name, we only expect the build path. // However, the caller may have added --follow which can add // content to stdout. So just grab the first line. buildPath := strings.TrimSpace(strings.Split(stdout, "\n")[0]) result = &BuildResult{ Build: nil, BuildPath: buildPath, StartBuildStdOut: stdout, StartBuildStdErr: stderr, StartBuildErr: nil, BuildAttempt: false, BuildSuccess: false, BuildFailure: false, BuildTimeout: false, oc: oc, } // An error here does not necessarily mean we could not run start-build. For example // when --wait is specified, start-build returns an error if the build fails. Therefore, // we continue to collect build information even if we see an error. result.StartBuildErr = err matches := buildPathPattern.FindStringSubmatch(buildPath) if len(matches) != 2 { return result, fmt.Errorf("Build path output did not match expected format 'build/name' : %q", buildPath) } buildName := matches[1] fmt.Fprintf(g.GinkgoWriter, "Waiting for %s to complete\n", buildPath) err = WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName, func(b *buildapi.Build) bool { result.Build = b result.BuildSuccess = CheckBuildSuccessFn(b) return result.BuildSuccess }, func(b *buildapi.Build) bool { result.Build = b result.BuildFailure = CheckBuildFailedFn(b) return result.BuildFailure }, ) if result.Build == nil { // We only abort here if the build progress was unobservable. Only known cause would be severe, non-build related error in WaitForABuild. return result, fmt.Errorf("Severe error waiting for build: %v", err) } result.BuildAttempt = true result.BuildTimeout = !(result.BuildFailure || result.BuildSuccess) fmt.Fprintf(g.GinkgoWriter, "Done waiting for %s: %#v\n", buildPath, *result) return result, nil } // WaitForABuild waits for a Build object to match either isOK or isFailed conditions. func WaitForABuild(c client.BuildInterface, name string, isOK, isFailed func(*buildapi.Build) bool) error { // wait 2 minutes for build to exist err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) { if _, err := c.Get(name); err != nil { return false, nil } return true, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("Timed out waiting for build %q to be created", name) } if err != nil { return err } // wait longer for the build to run to completion err = wait.Poll(5*time.Second, 60*time.Minute, func() (bool, error) { list, err := c.List(kapi.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector()}) if err != nil { return false, err } for i := range list.Items { if name == list.Items[i].Name && isOK(&list.Items[i]) { return true, nil } if name != list.Items[i].Name || isFailed(&list.Items[i]) { return false, fmt.Errorf("The build %q status is %q", name, list.Items[i].Status.Phase) } } return false, nil }) if err == wait.ErrWaitTimeout { return fmt.Errorf("Timed out waiting for build %q to complete", name) } return err } // CheckBuildSuccessFn returns true if the build succeeded var CheckBuildSuccessFn = func(b *buildapi.Build) bool { return b.Status.Phase == buildapi.BuildPhaseComplete } // CheckBuildFailedFn return true if the build failed var CheckBuildFailedFn = func(b *buildapi.Build) bool { return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError } // WaitForBuilderAccount waits until the builder service account gets fully // provisioned func WaitForBuilderAccount(c kcoreclient.ServiceAccountInterface) error { waitFn := func() (bool, error) { sc, err := c.Get("builder") if err != nil { // If we can't access the service accounts, let's wait till the controller // create it. if errors.IsForbidden(err) { return false, nil } return false, err } for _, s := range sc.Secrets { if strings.Contains(s.Name, "dockercfg") { return true, nil } } return false, nil } return wait.Poll(time.Duration(100*time.Millisecond), 1*time.Minute, waitFn) } // WaitForAnImageStream waits for an ImageStream to fulfill the isOK function func WaitForAnImageStream(client client.ImageStreamInterface, name string, isOK, isFailed func(*imageapi.ImageStream) bool) error { for { list, err := client.List(kapi.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector()}) if err != nil { return err } for i := range list.Items { if isOK(&list.Items[i]) { return nil } if isFailed(&list.Items[i]) { return fmt.Errorf("The image stream %q status is %q", name, list.Items[i].Annotations[imageapi.DockerImageRepositoryCheckAnnotation]) } } rv := list.ResourceVersion w, err := client.Watch(kapi.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector(), ResourceVersion: rv}) if err != nil { return err } defer w.Stop() for { val, ok := <-w.ResultChan() if !ok { // reget and re-watch break } if e, ok := val.Object.(*imageapi.ImageStream); ok { if isOK(e) { return nil } if isFailed(e) { return fmt.Errorf("The image stream %q status is %q", name, e.Annotations[imageapi.DockerImageRepositoryCheckAnnotation]) } } } } } // WaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag. // Defaults to waiting for 60 seconds func WaitForAnImageStreamTag(oc *CLI, namespace, name, tag string) error { return TimedWaitForAnImageStreamTag(oc, namespace, name, tag, time.Second*60) } // TimedWaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag. // Gives up waiting after the specified waitTimeout func TimedWaitForAnImageStreamTag(oc *CLI, namespace, name, tag string, waitTimeout time.Duration) error { g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name)) start := time.Now() c := make(chan error) go func() { err := WaitForAnImageStream( oc.Client().ImageStreams(namespace), name, func(is *imageapi.ImageStream) bool { if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 { return false } return true }, func(is *imageapi.ImageStream) bool { return time.Now().After(start.Add(waitTimeout)) }) c <- err }() select { case e := <-c: return e case <-time.After(waitTimeout): return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", namespace, name, tag) } } // CheckImageStreamLatestTagPopulatedFn returns true if the imagestream has a ':latest' tag filed var CheckImageStreamLatestTagPopulatedFn = func(i *imageapi.ImageStream) bool { _, ok := i.Status.Tags["latest"] return ok } // CheckImageStreamTagNotFoundFn return true if the imagestream update was not successful var CheckImageStreamTagNotFoundFn = func(i *imageapi.ImageStream) bool { return strings.Contains(i.Annotations[imageapi.DockerImageRepositoryCheckAnnotation], "not") || strings.Contains(i.Annotations[imageapi.DockerImageRepositoryCheckAnnotation], "error") } // compareResourceControllerNames compares names of two resource controllers. It returns: // -1 if rc a is older than b // 1 if rc a is newer than b // 0 if their names are the same func compareResourceControllerNames(a, b string) int { var reDeploymentConfigName = regexp.MustCompile(`^(.*)-(\d+)$`) am := reDeploymentConfigName.FindStringSubmatch(a) bm := reDeploymentConfigName.FindStringSubmatch(b) if len(am) == 0 || len(bm) == 0 { switch { case a < b: return -1 case a > b: return 1 default: return 0 } } aname, averstr := am[0], am[1] bname, bverstr := bm[0], bm[1] aver, _ := strconv.Atoi(averstr) bver, _ := strconv.Atoi(bverstr) switch { case aname < bname || (aname == bname && aver < bver): return -1 case bname < aname || (bname == aname && bver < aver): return 1 default: return 0 } } // WaitForADeployment waits for a deployment to fulfill either isOK or isFailed. // When isOK returns true, WaitForADeployment returns nil, when isFailed returns // true, WaitForADeployment returns an error including the deployment status. // WaitForADeployment waits for at most a certain timeout (non-configurable). func WaitForADeployment(client kcoreclient.ReplicationControllerInterface, name string, isOK, isFailed func(*kapi.ReplicationController) bool, oc *CLI) error { timeout := 15 * time.Minute // closing done signals that any pending operation should be aborted. done := make(chan struct{}) defer close(done) // okOrFailed returns whether a replication controller matches either of // the predicates isOK or isFailed, and the associated error in case of // failure. okOrFailed := func(rc *kapi.ReplicationController) (err error, matched bool) { if isOK(rc) { return nil, true } if isFailed(rc) { return fmt.Errorf("The deployment %q status is %q", name, rc.Annotations[deployapi.DeploymentStatusAnnotation]), true } return nil, false } // waitForDeployment waits until okOrFailed returns true or the done // channel is closed. waitForDeployment := func() (err error, retry bool) { requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, selection.Equals, []string{name}) if err != nil { return fmt.Errorf("unexpected error generating label selector: %v", err), false } list, err := client.List(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement)}) if err != nil { return err, false } // multiple deployments are conceivable; so we look to see how the latest depoy does var lastRC *kapi.ReplicationController for _, rc := range list.Items { if lastRC == nil { lastRC = &rc continue } if compareResourceControllerNames(lastRC.GetName(), rc.GetName()) <= 0 { lastRC = &rc } } if lastRC != nil { err, matched := okOrFailed(lastRC) if matched { return err, false } } w, err := client.Watch(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement), ResourceVersion: list.ResourceVersion}) if err != nil { return err, false } defer w.Stop() for { select { case val, ok := <-w.ResultChan(): if !ok { // watcher error, re-get and re-watch return nil, true } rc, ok := val.Object.(*kapi.ReplicationController) if !ok { continue } if lastRC == nil { lastRC = rc } // multiple deployments are conceivable; so we look to see how the latest deployment does if compareResourceControllerNames(lastRC.GetName(), rc.GetName()) <= 0 { lastRC = rc err, matched := okOrFailed(rc) if matched { return err, false } } case <-done: // no more time left, stop what we were doing, // do no retry. return nil, false } } } // errCh is buffered so the goroutine below never blocks on sending, // preventing a goroutine leak if we reach the timeout. errCh := make(chan error, 1) go func() { defer close(errCh) err, retry := waitForDeployment() for retry { err, retry = waitForDeployment() } errCh <- err }() select { case err := <-errCh: if err != nil { DumpDeploymentLogs(name, oc) } return err case <-time.After(timeout): DumpDeploymentLogs(name, oc) // end for timing issues where we miss watch updates return fmt.Errorf("timed out waiting for deployment %q after %v", name, timeout) } } // WaitForADeploymentToComplete waits for a deployment to complete. func WaitForADeploymentToComplete(client kcoreclient.ReplicationControllerInterface, name string, oc *CLI) error { return WaitForADeployment(client, name, CheckDeploymentCompletedFn, CheckDeploymentFailedFn, oc) } // WaitForRegistry waits until a newly deployed registry becomes ready. If waitForDCVersion is given, the // function will wait until a corresponding replica controller completes. If not give, the latest version of // registry's deployment config will be fetched from etcd. func WaitForRegistry( dcNamespacer client.DeploymentConfigsNamespacer, kubeClient kclientset.Interface, waitForDCVersion *int64, oc *CLI, ) error { var latestVersion int64 start := time.Now() if waitForDCVersion != nil { latestVersion = *waitForDCVersion } else { dc, err := dcNamespacer.DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry") if err != nil { return err } latestVersion = dc.Status.LatestVersion } fmt.Fprintf(g.GinkgoWriter, "waiting for deployment of version %d to complete\n", latestVersion) err := WaitForADeployment(kubeClient.Core().ReplicationControllers(kapi.NamespaceDefault), "docker-registry", func(rc *kapi.ReplicationController) bool { if !CheckDeploymentCompletedFn(rc) { return false } v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64) if err != nil { fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err) return false } return v >= latestVersion }, func(rc *kapi.ReplicationController) bool { v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64) if err != nil { fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err) return false } if v < latestVersion { return false } return CheckDeploymentFailedFn(rc) }, oc) if err != nil { return err } requirement, err := labels.NewRequirement(deployapi.DeploymentLabel, selection.Equals, []string{fmt.Sprintf("docker-registry-%d", latestVersion)}) pods, err := WaitForPods(kubeClient.Core().Pods(kapi.NamespaceDefault), labels.NewSelector().Add(*requirement), CheckPodIsReadyFn, 1, time.Minute) now := time.Now() fmt.Fprintf(g.GinkgoWriter, "deployed registry pod %s after %s\n", pods[0], now.Sub(start).String()) return err } func isUsageSynced(received, expected kapi.ResourceList, expectedIsUpperLimit bool) bool { resourceNames := quota.ResourceNames(expected) masked := quota.Mask(received, resourceNames) if len(masked) != len(expected) { return false } if expectedIsUpperLimit { if le, _ := quota.LessThanOrEqual(masked, expected); !le { return false } } else { if le, _ := quota.LessThanOrEqual(expected, masked); !le { return false } } return true } // WaitForResourceQuotaSync watches given resource quota until its usage is updated to desired level or a // timeout occurs. If successful, used quota values will be returned for expected resources. Otherwise an // ErrWaitTimeout will be returned. If expectedIsUpperLimit is true, given expected usage must compare greater // or equal to quota's usage, which is useful for expected usage increment. Otherwise expected usage must // compare lower or equal to quota's usage, which is useful for expected usage decrement. func WaitForResourceQuotaSync( client kcoreclient.ResourceQuotaInterface, name string, expectedUsage kapi.ResourceList, expectedIsUpperLimit bool, timeout time.Duration, ) (kapi.ResourceList, error) { startTime := time.Now() endTime := startTime.Add(timeout) expectedResourceNames := quota.ResourceNames(expectedUsage) list, err := client.List(kapi.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector()}) if err != nil { return nil, err } for i := range list.Items { used := quota.Mask(list.Items[i].Status.Used, expectedResourceNames) if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { return used, nil } } rv := list.ResourceVersion w, err := client.Watch(kapi.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector(), ResourceVersion: rv}) if err != nil { return nil, err } defer w.Stop() for time.Now().Before(endTime) { select { case val, ok := <-w.ResultChan(): if !ok { // reget and re-watch continue } if rq, ok := val.Object.(*kapi.ResourceQuota); ok { used := quota.Mask(rq.Status.Used, expectedResourceNames) if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { return used, nil } } case <-time.After(endTime.Sub(time.Now())): return nil, wait.ErrWaitTimeout } } return nil, wait.ErrWaitTimeout } // CheckDeploymentCompletedFn returns true if the deployment completed var CheckDeploymentCompletedFn = func(d *kapi.ReplicationController) bool { return deployutil.IsCompleteDeployment(d) } // CheckDeploymentFailedFn returns true if the deployment failed var CheckDeploymentFailedFn = func(d *kapi.ReplicationController) bool { return deployutil.IsFailedDeployment(d) } // GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names. func GetPodNamesByFilter(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool) (podNames []string, err error) { podList, err := c.List(kapi.ListOptions{LabelSelector: label}) if err != nil { return nil, err } for _, pod := range podList.Items { if predicate(pod) { podNames = append(podNames, pod.Name) } } return podNames, nil } func WaitForAJob(c kbatchclient.JobInterface, name string, timeout time.Duration) error { return wait.Poll(1*time.Second, timeout, func() (bool, error) { j, e := c.Get(name) if e != nil { return true, e } // TODO soltysh: replace this with a function once such exist, currently // it's private in the controller for _, c := range j.Status.Conditions { if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == kapi.ConditionTrue { return true, nil } } return false, nil }) } // WaitForPods waits until given number of pods that match the label selector and // satisfy the predicate are found func WaitForPods(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool, count int, timeout time.Duration) ([]string, error) { var podNames []string err := wait.Poll(1*time.Second, timeout, func() (bool, error) { p, e := GetPodNamesByFilter(c, label, predicate) if e != nil { return true, e } if len(p) != count { return false, nil } podNames = p return true, nil }) return podNames, err } // CheckPodIsRunningFn returns true if the pod is running var CheckPodIsRunningFn = func(pod kapi.Pod) bool { return pod.Status.Phase == kapi.PodRunning } // CheckPodIsSucceededFn returns true if the pod status is "Succdeded" var CheckPodIsSucceededFn = func(pod kapi.Pod) bool { return pod.Status.Phase == kapi.PodSucceeded } // CheckPodIsReadyFn returns true if the pod's ready probe determined that the pod is ready. var CheckPodIsReadyFn = func(pod kapi.Pod) bool { if pod.Status.Phase != kapi.PodRunning { return false } for _, cond := range pod.Status.Conditions { if cond.Type != kapi.PodReady { continue } return cond.Status == kapi.ConditionTrue } return false } // WaitUntilPodIsGone waits until the named Pod will disappear func WaitUntilPodIsGone(c kcoreclient.PodInterface, podName string, timeout time.Duration) error { return wait.Poll(1*time.Second, timeout, func() (bool, error) { _, err := c.Get(podName) if err != nil { if strings.Contains(err.Error(), "not found") { return true, nil } return true, err } return false, nil }) } // GetDockerImageReference retrieves the full Docker pull spec from the given ImageStream // and tag func GetDockerImageReference(c client.ImageStreamInterface, name, tag string) (string, error) { imageStream, err := c.Get(name) if err != nil { return "", err } isTag, ok := imageStream.Status.Tags[tag] if !ok { return "", fmt.Errorf("ImageStream %q does not have tag %q", name, tag) } if len(isTag.Items) == 0 { return "", fmt.Errorf("ImageStreamTag %q is empty", tag) } return isTag.Items[0].DockerImageReference, nil } // GetPodForContainer creates a new Pod that runs specified container func GetPodForContainer(container kapi.Container) *kapi.Pod { name := namer.GetPodName("test-pod", string(uuid.NewUUID())) return &kapi.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: map[string]string{"name": name}, }, Spec: kapi.PodSpec{ Containers: []kapi.Container{container}, RestartPolicy: kapi.RestartPolicyNever, }, } } // CreatePersistentVolume creates a HostPath Persistent Volume. func CreatePersistentVolume(name, capacity, hostPath string) *kapi.PersistentVolume { return &kapi.PersistentVolume{ TypeMeta: unversioned.TypeMeta{ Kind: "PersistentVolume", APIVersion: "v1", }, ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: map[string]string{"name": name}, }, Spec: kapi.PersistentVolumeSpec{ PersistentVolumeSource: kapi.PersistentVolumeSource{ HostPath: &kapi.HostPathVolumeSource{ Path: hostPath, }, }, Capacity: kapi.ResourceList{ kapi.ResourceStorage: resource.MustParse(capacity), }, AccessModes: []kapi.PersistentVolumeAccessMode{ kapi.ReadWriteOnce, kapi.ReadOnlyMany, kapi.ReadWriteMany, }, }, } } // SetupHostPathVolumes will create multiple PersistentVolumes with given capacity func SetupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix, capacity string, count int) (volumes []*kapi.PersistentVolume, err error) { rootDir, err := ioutil.TempDir(TestContext.OutputDir, "persistent-volumes") if err != nil { return volumes, err } for i := 0; i < count; i++ { dir, err := ioutil.TempDir(rootDir, fmt.Sprintf("%0.4d", i)) if err != nil { return volumes, err } if _, err = exec.LookPath("chcon"); err != nil { err := exec.Command("chcon", "-t", "svirt_sandbox_file_t", dir).Run() if err != nil { return volumes, err } } if err = os.Chmod(dir, 0777); err != nil { return volumes, err } pv, err := c.Create(CreatePersistentVolume(fmt.Sprintf("%s%s-%0.4d", pvPrefix, prefix, i), capacity, dir)) if err != nil { return volumes, err } volumes = append(volumes, pv) } return volumes, err } // CleanupHostPathVolumes removes all PersistentVolumes created by // SetupHostPathVolumes, with a given prefix func CleanupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix string) error { pvs, err := c.List(kapi.ListOptions{}) if err != nil { return err } prefix = fmt.Sprintf("%s%s-", pvPrefix, prefix) for _, pv := range pvs.Items { if !strings.HasPrefix(pv.Name, prefix) { continue } pvInfo, err := c.Get(pv.Name) if err != nil { fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't get meta info for PV %s: %v\n", pv.Name, err) continue } if err = c.Delete(pv.Name, nil); err != nil { fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove PV %s: %v\n", pv.Name, err) continue } volumeDir := pvInfo.Spec.HostPath.Path if err = os.RemoveAll(volumeDir); err != nil { fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove directory %q: %v\n", volumeDir, err) continue } parentDir := filepath.Dir(volumeDir) if parentDir == "." || parentDir == "/" { continue } if err = os.Remove(parentDir); err != nil { fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove directory %q: %v\n", parentDir, err) continue } } return nil } // KubeConfigPath returns the value of KUBECONFIG environment variable func KubeConfigPath() string { // can't use gomega in this method since it is used outside of It() return os.Getenv("KUBECONFIG") } // ExtendedTestPath returns absolute path to extended tests directory func ExtendedTestPath() string { // can't use gomega in this method since it is used outside of It() return os.Getenv("EXTENDED_TEST_PATH") } //ArtifactDirPath returns the value of ARTIFACT_DIR environment variable func ArtifactDirPath() string { path := os.Getenv("ARTIFACT_DIR") o.Expect(path).NotTo(o.BeNil()) o.Expect(path).NotTo(o.BeEmpty()) return path } //ArtifactPath returns the absolute path to the fix artifact file //The path is relative to ARTIFACT_DIR func ArtifactPath(elem ...string) string { return filepath.Join(append([]string{ArtifactDirPath()}, elem...)...) } // FixturePath returns absolute path to given fixture file // The path is relative to EXTENDED_TEST_PATH (./test/extended/*) func FixturePath(elem ...string) string { return filepath.Join(append([]string{ExtendedTestPath()}, elem...)...) } // FetchURL grabs the output from the specified url and returns it. // It will retry once per second for duration retryTimeout if an error occurs during the request. func FetchURL(url string, retryTimeout time.Duration) (response string, err error) { waitFn := func() (bool, error) { r, err := http.Get(url) if err != nil || r.StatusCode != 200 { // lie to the poller that we didn't get an error even though we did // because otherwise it's going to give up. return false, nil } defer r.Body.Close() bytes, err := ioutil.ReadAll(r.Body) response = string(bytes) return true, nil } pollErr := wait.Poll(time.Duration(1*time.Second), retryTimeout, waitFn) if pollErr == wait.ErrWaitTimeout { return "", fmt.Errorf("Timed out while fetching url %q", url) } if pollErr != nil { return "", pollErr } return } // ParseLabelsOrDie turns the given string into a label selector or // panics; for tests or other cases where you know the string is valid. // TODO: Move this to the upstream labels package. func ParseLabelsOrDie(str string) labels.Selector { ret, err := labels.Parse(str) if err != nil { panic(fmt.Sprintf("cannot parse '%v': %v", str, err)) } return ret } // GetEndpointAddress will return an "ip:port" string for the endpoint. func GetEndpointAddress(oc *CLI, name string) (string, error) { err := oc.KubeFramework().WaitForAnEndpoint(name) if err != nil { return "", err } endpoint, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(name) if err != nil { return "", err } return fmt.Sprintf("%s:%d", endpoint.Subsets[0].Addresses[0].IP, endpoint.Subsets[0].Ports[0].Port), nil } // GetPodForImage creates a new Pod that runs the containers from specified // Docker image reference func GetPodForImage(dockerImageReference string) *kapi.Pod { return GetPodForContainer(kapi.Container{ Name: "test", Image: dockerImageReference, }) } // CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. // Returns the name of the created pod. // TODO: expose upstream func CreateExecPodOrFail(client kcoreclient.CoreInterface, ns, name string) string { framework.Logf("Creating new exec pod") execPod := framework.NewHostExecPodSpec(ns, name) created, err := client.Pods(ns).Create(execPod) o.Expect(err).NotTo(o.HaveOccurred()) err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) if err != nil { return false, nil } return retrievedPod.Status.Phase == kapi.PodRunning, nil }) o.Expect(err).NotTo(o.HaveOccurred()) return created.Name } // CreateExecPodOnNode launches a exec pod in the given namespace and node // waits until it's Running, created pod name would be returned // TODO: expose upstream func CreateExecPodOnNode(client kcoreclient.CoreInterface, ns, nodeName, name string) string { framework.Logf("Creating exec pod %q in namespace %q", name, ns) execPod := framework.NewHostExecPodSpec(ns, name) execPod.Spec.NodeName = nodeName created, err := client.Pods(ns).Create(execPod) o.Expect(err).NotTo(o.HaveOccurred()) err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) if err != nil { return false, nil } return retrievedPod.Status.Phase == kapi.PodRunning, nil }) o.Expect(err).NotTo(o.HaveOccurred()) return created.Name }
[ "\"KUBECONFIG\"", "\"EXTENDED_TEST_PATH\"", "\"ARTIFACT_DIR\"" ]
[]
[ "EXTENDED_TEST_PATH", "ARTIFACT_DIR", "KUBECONFIG" ]
[]
["EXTENDED_TEST_PATH", "ARTIFACT_DIR", "KUBECONFIG"]
go
3
0
meet_app/app.py
"""Flask application run.""" import logging import os import sys import flask import pytz from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.interval import IntervalTrigger import settings from bin import load_data from bin import run_migrations from data import db_session from enums.sa import SyncType from migrations import utils as migrations_utils app = flask.Flask(__name__) app.deploying = bool(int(os.getenv('IS_DEPLOY', '0'))) app.is_sql_ver = bool(int(os.getenv('IS_SQL_VERSION', '0'))) scheduler = BackgroundScheduler() def main(): configure() if not app.testing and not app.deploying: app.run(debug=False, host='localhost') def configure(): init_logging() update_cfg() register_blueprints() setup_db() if app.is_sql_ver: all_db_models = generate_all_db_models() run_actions() def register_blueprints(): from views import meet_views app.register_blueprint(meet_views.blueprint) # from utils import py as py_utils # views, _ = py_utils.import_modules( # 'views/__init__.py', 'views', w_classes=False) # for view in views.values(): # app.register_blueprint(view.blueprint) def setup_db(): if app.is_sql_ver: # TODO: Add MySQL version db_session.init_sql(settings.DB_CONNECTION) # enable for flask app not in debug mode to avoid auto apply run_migrations.run() else: # TODO: Add MongoDB version db_session.init_no_sql(**settings.NOSQL_DB_CONNECTION) pass def generate_all_db_models(): db_models = migrations_utils.get_models(os.path.join( os.path.dirname(__file__), 'data', 'generated_all_db_models.py')) return db_models def run_actions(): # Sync data on application run. sync = load_data.run(sync_type=SyncType.app_init.value, forced=False) scheduler.start() #TODO: Fix duplication of db entiries # Sync data by interval. @scheduler.scheduled_job( IntervalTrigger(timezone=pytz.utc, **settings.SYNC_INTERVAL)) def load_data_job(): load_data.run(sync_type=SyncType.scheduled.value) def init_logging(): logging.basicConfig(stream=sys.stdout, level=logging.INFO) # logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) # logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG) def update_cfg(): global app # flask_cfg.from_pyfile('settings.py') # no needed 'flask.py' app.config.update({ **settings.FLASK_ENV_CFG, **settings.FLASK_SEC_ENV_CFG, }) if __name__ in ('__main__', 'meet_app.app'): main()
[]
[]
[ "IS_DEPLOY", "IS_SQL_VERSION" ]
[]
["IS_DEPLOY", "IS_SQL_VERSION"]
python
2
0
cities/management/commands/cities.py
""" GeoNames city data import script. Requires the following files: http://download.geonames.org/export/dump/ - Countries: countryInfo.txt - Regions: admin1CodesASCII.txt - Subregions: admin2Codes.txt - Cities: cities5000.zip - Districts: hierarchy.zip - Localization: alternateNames.zip http://download.geonames.org/export/zip/ - Postal Codes: allCountries.zip """ from __future__ import print_function import io import json import logging import math import os import re import sys import zipfile try: from urllib.request import urlopen except ImportError: from urllib import urlopen from itertools import chain from optparse import make_option from swapper import load_model from tqdm import tqdm from django import VERSION as django_version from django.contrib.gis.gdal.envelope import Envelope from django.contrib.gis.geos import Point from django.contrib.gis.measure import D try: from django.contrib.gis.db.models.functions import Distance except ImportError: pass from django.core.management.base import BaseCommand from django.db import transaction from django.db.models import Q from django.db.models import CharField, ForeignKey from ...conf import (city_types, import_opts, import_opts_all, HookException, settings, CURRENCY_SYMBOLS, INCLUDE_AIRPORT_CODES, INCLUDE_NUMERIC_ALTERNATIVE_NAMES, NO_LONGER_EXISTENT_COUNTRY_CODES, SKIP_CITIES_WITH_EMPTY_REGIONS, VALIDATE_POSTAL_CODES) from ...models import (Region, Subregion, District, PostalCode, AlternativeName) from ...util import geo_distance from django.db import connection # Interpret all files as utf-8 if sys.version_info < (3,): reload(sys) # noqa: F821 sys.setdefaultencoding('utf-8') # Load swappable models Continent = load_model('cities', 'Continent') Country = load_model('cities', 'Country') City = load_model('cities', 'City') # Only log errors during Travis tests LOGGER_NAME = os.environ.get('TRAVIS_LOGGER_NAME', 'cities') # TODO: Remove backwards compatibility once django-cities requires Django 1.7 # or 1.8 LTS. # _transact = (transaction.commit_on_success if django_version < (1, 6) else # transaction.atomic) class Command(BaseCommand): if hasattr(settings, 'data_dir'): data_dir = settings.data_dir else: app_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + '/../..') data_dir = os.path.join(app_dir, 'data') logger = logging.getLogger(LOGGER_NAME) if django_version < (1, 8): option_list = getattr(BaseCommand, 'option_list', ()) + ( make_option( '--force', action='store_true', default=False, help='Import even if files are up-to-date.'), make_option( '--import', metavar="DATA_TYPES", default='all', help='Selectively import data. Comma separated list of data ' + 'types: ' + str(import_opts).replace("'", '')), make_option( '--flush', metavar="DATA_TYPES", default='', help="Selectively flush data. Comma separated list of data types."), ) def add_arguments(self, parser): parser.add_argument( '--force', action='store_true', default=False, dest="force", help='Import even if files are up-to-date.' ) parser.add_argument( '--import', metavar="DATA_TYPES", default='all', dest="import", help='Selectively import data. Comma separated list of data ' + 'types: ' + str(import_opts).replace("'", '') ) parser.add_argument( '--flush', metavar="DATA_TYPES", default='', dest="flush", help="Selectively flush data. Comma separated list of data types." ) parser.add_argument( '--quiet', action='store_true', default=False, dest="quiet", help="Do not show the progress bar." ) @transaction.atomic def handle(self, *args, **options): self.download_cache = {} self.options = options self.force = self.options['force'] self.flushes = [e for e in self.options.get('flush', '').split(',') if e] if 'all' in self.flushes: self.flushes = import_opts_all for flush in self.flushes: func = getattr(self, "flush_" + flush) func() self.imports = [e for e in self.options.get('import', '').split(',') if e] if 'all' in self.imports: self.imports = import_opts_all if self.flushes: self.imports = [] for import_ in self.imports: func = getattr(self, "import_" + import_) func() def call_hook(self, hook, *args, **kwargs): if hasattr(settings, 'plugins'): for plugin in settings.plugins[hook]: try: func = getattr(plugin, hook) func(self, *args, **kwargs) except HookException as e: error = str(e) if error: self.logger.error(error) return False return True def download(self, filekey): if 'filename' in settings.files[filekey]: filenames = [settings.files[filekey]['filename']] else: filenames = settings.files[filekey]['filenames'] for filename in filenames: web_file = None urls = [e.format(filename=filename) for e in settings.files[filekey]['urls']] for url in urls: try: web_file = urlopen(url) if 'html' in web_file.headers['Content-Type']: # TODO: Make this a subclass raise Exception("Content type of downloaded file was {}".format(web_file.headers['Content-Type'])) self.logger.debug("Downloaded: {}".format(url)) break except Exception: web_file = None continue else: self.logger.error("Web file not found: %s. Tried URLs:\n%s", filename, '\n'.join(urls)) if web_file is not None: self.logger.debug("Saving: {}/{}".format(self.data_dir, filename)) if not os.path.exists(self.data_dir): os.makedirs(self.data_dir) file = io.open(os.path.join(self.data_dir, filename), 'wb') file.write(web_file.read()) file.close() elif not os.path.exists(os.path.join(self.data_dir, filename)): raise Exception("File not found and download failed: {} [{}]".format(filename, url)) def get_data(self, filekey): if 'filename' in settings.files[filekey]: filenames = [settings.files[filekey]['filename']] else: filenames = settings.files[filekey]['filenames'] for filename in filenames: name, ext = filename.rsplit('.', 1) if (ext == 'zip'): filepath = os.path.join(self.data_dir, filename) zip_member = zipfile.ZipFile(filepath).open(name + '.txt', 'r') file_obj = io.TextIOWrapper(zip_member, encoding='utf-8') else: file_obj = io.open(os.path.join(self.data_dir, filename), 'r', encoding='utf-8') for row in file_obj: if not row.startswith('#'): yield dict(list(zip(settings.files[filekey]['fields'], row.rstrip('\n').split("\t")))) def parse(self, data): for line in data: if len(line) < 1 or line[0] == '#': continue items = [e.strip() for e in line.split('\t')] yield items def import_country(self): self.download('country') data = self.get_data('country') total = sum(1 for _ in data) - len(NO_LONGER_EXISTENT_COUNTRY_CODES) data = self.get_data('country') neighbours = {} countries = {} continents = {c.code: c for c in Continent.objects.all()} # If the continent attribute on Country is a ForeignKey, import # continents as ForeignKeys to the Continent models, otherwise assume # they are still the CharField(max_length=2) and import them the old way import_continents_as_fks = type(Country._meta.get_field('continent')) == ForeignKey for item in tqdm([d for d in data if d['code'] not in NO_LONGER_EXISTENT_COUNTRY_CODES], disable=self.options.get('quiet'), total=total, desc="Importing countries"): if not self.call_hook('country_pre', item): continue try: country_id = int(item['geonameid']) except KeyError: self.logger.warning("Country has no geonameid: {} -- skipping".format(item)) continue except ValueError: self.logger.warning("Country has non-numeric geonameid: {} -- skipping".format(item['geonameid'])) continue defaults = { 'name': item['name'], 'code': item['code'], 'code3': item['code3'], 'population': item['population'], 'continent': continents[item['continent']] if import_continents_as_fks else item['continent'], 'tld': item['tld'][1:], # strip the leading . 'phone': item['phone'], 'currency': item['currencyCode'], 'currency_name': item['currencyName'], 'capital': item['capital'], 'area': int(float(item['area'])) if item['area'] else None, } if hasattr(Country, 'language_codes'): defaults['language_codes'] = item['languages'] elif hasattr(Country, 'languages') and type(getattr(Country, 'languages')) == CharField: defaults['languages'] = item['languages'] # These fields shouldn't impact saving older models (that don't # have these attributes) try: defaults['currency_symbol'] = CURRENCY_SYMBOLS.get(item['currencyCode'], None) defaults['postal_code_format'] = item['postalCodeFormat'] defaults['postal_code_regex'] = item['postalCodeRegex'] except AttributeError: pass # Make importing countries idempotent country, created = Country.objects.update_or_create(id=country_id, defaults=defaults) self.logger.debug("%s country '%s'", "Added" if created else "Updated", defaults['name']) neighbours[country] = item['neighbours'].split(",") countries[country.code] = country if not self.call_hook('country_post', country, item): continue for country, neighbour_codes in tqdm(list(neighbours.items()), disable=self.options.get('quiet'), total=len(neighbours), desc="Importing country neighbours"): neighbours = [x for x in [countries.get(x) for x in neighbour_codes if x] if x] country.neighbours.add(*neighbours) def build_country_index(self): if hasattr(self, 'country_index'): return self.country_index = {} for obj in tqdm(Country.objects.all(), disable=self.options.get('quiet'), total=Country.objects.all().count(), desc="Building country index"): self.country_index[obj.code] = obj def import_region(self): self.download('region') data = self.get_data('region') self.build_country_index() total = sum(1 for _ in data) data = self.get_data('region') countries_not_found = {} for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing regions"): if not self.call_hook('region_pre', item): continue try: region_id = int(item['geonameid']) except KeyError: self.logger.warning("Region has no geonameid: {} -- skipping".format(item)) continue except ValueError: self.logger.warning("Region has non-numeric geonameid: {} -- skipping".format(item['geonameid'])) continue country_code, region_code = item['code'].split(".") defaults = { 'name': item['name'], 'name_std': item['asciiName'], 'code': region_code, } try: defaults['country'] = self.country_index[country_code] except KeyError: countries_not_found.setdefault(country_code, []).append(defaults['name']) self.logger.warning("Region: %s: Cannot find country: %s -- skipping", defaults['name'], country_code) continue region, created = Region.objects.update_or_create(id=region_id, defaults=defaults) if not self.call_hook('region_post', region, item): continue self.logger.debug("%s region: %s, %s", "Added" if created else "Updated", item['code'], region) if countries_not_found: countries_not_found_file = os.path.join(self.data_dir, 'countries_not_found.json') try: with open(countries_not_found_file, 'w+') as fp: json.dump(countries_not_found, fp) except Exception as e: self.logger.warning("Unable to write log file '{}': {}".format( countries_not_found_file, e)) def build_region_index(self): if hasattr(self, 'region_index'): return self.region_index = {} for obj in tqdm(chain(Region.objects.all().prefetch_related('country'), Subregion.objects.all().prefetch_related('region__country')), disable=self.options.get('quiet'), total=Region.objects.all().count() + Subregion.objects.all().count(), desc="Building region index"): self.region_index[obj.full_code()] = obj def import_subregion(self): self.download('subregion') data = self.get_data('subregion') total = sum(1 for _ in data) data = self.get_data('subregion') self.build_country_index() self.build_region_index() regions_not_found = {} for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing subregions"): if not self.call_hook('subregion_pre', item): continue try: subregion_id = int(item['geonameid']) except KeyError: self.logger.warning("Subregion has no geonameid: {} -- skipping".format(item)) continue except ValueError: self.logger.warning("Subregion has non-numeric geonameid: {} -- skipping".format(item['geonameid'])) continue country_code, region_code, subregion_code = item['code'].split(".") defaults = { 'name': item['name'], 'name_std': item['asciiName'], 'code': subregion_code, } try: defaults['region'] = self.region_index[country_code + "." + region_code] except KeyError: regions_not_found.setdefault(country_code, {}) regions_not_found[country_code].setdefault(region_code, []).append(defaults['name']) self.logger.debug("Subregion: %s %s: Cannot find region", item['code'], defaults['name']) continue subregion, created = Subregion.objects.update_or_create(id=subregion_id, defaults=defaults) if not self.call_hook('subregion_post', subregion, item): continue self.logger.debug("%s subregion: %s, %s", "Added" if created else "Updated", item['code'], subregion) if regions_not_found: regions_not_found_file = os.path.join(self.data_dir, 'regions_not_found.json') try: with open(regions_not_found_file, 'w+') as fp: json.dump(regions_not_found, fp) except Exception as e: self.logger.warning("Unable to write log file '{}': {}".format( regions_not_found_file, e)) del self.region_index def import_city(self): self.download('city') data = self.get_data('city') total = sum(1 for _ in data) data = self.get_data('city') self.build_country_index() self.build_region_index() for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing cities"): if not self.call_hook('city_pre', item): continue if item['featureCode'] not in city_types: continue try: city_id = int(item['geonameid']) except KeyError: self.logger.warning("City has no geonameid: {} -- skipping".format(item)) continue except ValueError: self.logger.warning("City has non-numeric geonameid: {} -- skipping".format(item['geonameid'])) continue defaults = { 'name': item['name'], 'kind': item['featureCode'], 'name_std': item['asciiName'], 'location': Point(float(item['longitude']), float(item['latitude'])), 'population': int(item['population']), 'timezone': item['timezone'], } try: defaults['elevation'] = int(item['elevation']) except (KeyError, ValueError): pass country_code = item['countryCode'] try: country = self.country_index[country_code] defaults['country'] = country except KeyError: self.logger.warning("City: %s: Cannot find country: '%s' -- skipping", item['name'], country_code) continue region_code = item['admin1Code'] try: region_key = country_code + "." + region_code region = self.region_index[region_key] defaults['region'] = region except KeyError: self.logger.debug('SKIP_CITIES_WITH_EMPTY_REGIONS: %s', str(SKIP_CITIES_WITH_EMPTY_REGIONS)) if SKIP_CITIES_WITH_EMPTY_REGIONS: self.logger.debug("%s: %s: Cannot find region: '%s' -- skipping", country_code, item['name'], region_code) continue else: defaults['region'] = None subregion_code = item['admin2Code'] try: subregion = self.region_index[country_code + "." + region_code + "." + subregion_code] defaults['subregion'] = subregion except KeyError: try: with transaction.atomic(): defaults['subregion'] = Subregion.objects.get( Q(name=subregion_code) | Q(name=subregion_code.replace(' (undefined)', '')), region=defaults['region']) except Subregion.DoesNotExist: try: with transaction.atomic(): defaults['subregion'] = Subregion.objects.get( Q(name_std=subregion_code) | Q(name_std=subregion_code.replace(' (undefined)', '')), region=defaults['region']) except Subregion.DoesNotExist: if subregion_code: self.logger.debug("%s: %s: Cannot find subregion: '%s'", country_code, item['name'], subregion_code) defaults['subregion'] = None city, created = City.objects.update_or_create(id=city_id, defaults=defaults) if not self.call_hook('city_post', city, item): continue self.logger.debug("%s city: %s", "Added" if created else "Updated", city) def build_hierarchy(self): if hasattr(self, 'hierarchy') and self.hierarchy: return self.download('hierarchy') data = self.get_data('hierarchy') total = sum(1 for _ in data) data = self.get_data('hierarchy') self.hierarchy = {} for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Building hierarchy index"): parent_id = int(item['parent']) child_id = int(item['child']) self.hierarchy[child_id] = parent_id def import_district(self): self.download('city') data = self.get_data('city') total = sum(1 for _ in data) data = self.get_data('city') self.build_country_index() self.build_region_index() self.build_hierarchy() city_index = {} for obj in tqdm(City.objects.all(), disable=self.options.get('quiet'), total=City.objects.all().count(), desc="Building city index"): city_index[obj.id] = obj for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing districts"): if not self.call_hook('district_pre', item): continue _type = item['featureCode'] if _type not in settings.district_types: continue defaults = { 'name': item['name'], 'name_std': item['asciiName'], 'location': Point(float(item['longitude']), float(item['latitude'])), 'population': int(item['population']), } if hasattr(District, 'code'): defaults['code'] = item['admin3Code'], geonameid = int(item['geonameid']) if item['geonameid']==6947513 or item['geonameid']=='6947513': print(item['name'], item['featureCode'], item['geonameid']) print(self.hierarchy[geonameid]) print(city_index[self.hierarchy[geonameid]]) # Find city city = None try: city = city_index[self.hierarchy[geonameid]] except KeyError: self.logger.debug("District: %d %s: Cannot find city in hierarchy, using nearest", geonameid, defaults['name']) city_pop_min = 100000 # we are going to try to find closet city using native # database .distance(...) query but if that fails then # we fall back to degree search, MYSQL has no support # and Spatialite with SRID 4236. try: ''' if django_version < (1, 9): city = City.objects.filter(population__gt=city_pop_min)\ .distance(defaults['location'])\ .order_by('distance')[0] else: print(connection.vendor) if connection.vendor=="mysql": print("distance not supported on mysql databases") else: city = City.objects.filter( location__distance_lte=(defaults['location'], D(km=1000)) ).annotate( distance=Distance('location', defaults['location']) ).order_by('distance').first() ''' except (City.DoesNotExist, ValueError) as e: self.logger.warning( "District: %s: DB backend does not support native '.distance(...)' query " "falling back to two degree search", defaults['name'] ) search_deg = 2 min_dist = float('inf') bounds = Envelope( defaults['location'].x - search_deg, defaults['location'].y - search_deg, defaults['location'].x + search_deg, defaults['location'].y + search_deg) for e in City.objects.filter(population__gt=city_pop_min).filter( location__intersects=bounds.wkt): dist = geo_distance(defaults['location'], e.location) if dist < min_dist: min_dist = dist city = e else: self.logger.debug("Found city in hierarchy: %s [%d]", city.name, geonameid) if not city: self.logger.warning("District: %s: Cannot find city -- skipping", defaults['name']) continue defaults['city'] = city try: with transaction.atomic(): district = District.objects.get(city=defaults['city'], name=defaults['name']) except District.DoesNotExist: # If the district doesn't exist, create it with the geonameid # as its id district, created = District.objects.update_or_create(id=item['geonameid'], defaults=defaults) else: # Since the district already exists, but doesn't have its # geonameid as its id, we need to update all of its attributes # *except* for its id for key, value in defaults.items(): setattr(district, key, value) district.save() created = False if not self.call_hook('district_post', district, item): continue self.logger.debug("%s district: %s", "Added" if created else "Updated", district) def import_alt_name(self): self.download('alt_name') data = self.get_data('alt_name') total = sum(1 for _ in data) data = self.get_data('alt_name') geo_index = {} for type_ in (Country, Region, Subregion, City, District): plural_type_name = '{}s'.format(type_.__name__) if type_.__name__[-1] != 'y' else '{}ies'.format(type_.__name__[:-1]) for obj in tqdm(type_.objects.all(), disable=self.options.get('quiet'), total=type_.objects.all().count(), desc="Building geo index for {}".format(plural_type_name.lower())): geo_index[obj.id] = { 'type': type_, 'object': obj, } for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing data for alternative names"): if not self.call_hook('alt_name_pre', item): continue # Only get names for languages in use locale = item['language'] if not locale: locale = 'und' if locale not in settings.locales and 'all' not in settings.locales: self.logger.debug( "Alternative name with language [{}]: {} " "({}) -- skipping".format( item['language'], item['name'], item['nameid'])) continue # Check if known geo id geo_id = int(item['geonameid']) try: geo_info = geo_index[geo_id] except KeyError: continue try: alt_id = int(item['nameid']) except KeyError: self.logger.warning("Alternative name has no nameid: {} -- skipping".format(item)) continue try: alt = AlternativeName.objects.get(id=alt_id) except AlternativeName.DoesNotExist: alt = AlternativeName(id=alt_id) alt.name = item['name'] alt.is_preferred = bool(item['isPreferred']) alt.is_short = bool(item['isShort']) try: alt.language_code = locale except AttributeError: alt.language = locale try: int(item['name']) except ValueError: pass else: if not INCLUDE_NUMERIC_ALTERNATIVE_NAMES: self.logger.debug( "Trying to add a numeric alternative name to {} ({}): {} -- skipping".format( geo_info['object'].name, geo_info['type'].__name__, item['name'])) continue alt.is_historic = True if ((item['isHistoric']and item['isHistoric'] != '\n') or locale == 'fr_1793') else False if locale == 'post': try: if geo_index[item['geonameid']]['type'] == Region: region = geo_index[item['geonameid']]['object'] PostalCode.objects.get_or_create( code=item['name'], country=region.country, region=region, region_name=region.name) elif geo_index[item['geonameid']]['type'] == Subregion: subregion = geo_index[item['geonameid']]['object'] PostalCode.objects.get_or_create( code=item['name'], country=subregion.region.country, region=subregion.region, subregion=subregion, region_name=subregion.region.name, subregion_name=subregion.name) elif geo_index[item['geonameid']]['type'] == City: city = geo_index[item['geonameid']]['object'] PostalCode.objects.get_or_create( code=item['name'], country=city.country, region=city.region, subregion=city.subregion, region_name=city.region.name, subregion_name=city.subregion.name) except KeyError: pass continue if hasattr(alt, 'kind'): if (locale in ('abbr', 'link', 'name') or INCLUDE_AIRPORT_CODES and locale in ('iana', 'icao', 'faac')): alt.kind = locale elif locale not in settings.locales and 'all' not in settings.locales: self.logger.debug("Unknown alternative name type: {} -- skipping".format(locale)) continue alt.save() geo_info['object'].alt_names.add(alt) if not self.call_hook('alt_name_post', alt, item): continue self.logger.debug("Added alt name: %s, %s", locale, alt) def build_postal_code_regex_index(self): if hasattr(self, 'postal_code_regex_index') and self.postal_code_regex_index: return self.build_country_index() self.postal_code_regex_index = {} for code, country in tqdm(self.country_index.items(), disable=self.options.get('quiet'), total=len(self.country_index), desc="Building postal code regex index"): try: self.postal_code_regex_index[code] = re.compile(country.postal_code_regex) except Exception as e: self.logger.error("Couldn't compile postal code regex for {}: {}".format(country.code, e.args)) self.postal_code_regex_index[code] = '' def import_postal_code(self): self.download('postal_code') data = self.get_data('postal_code') total = sum(1 for _ in data) data = self.get_data('postal_code') self.build_country_index() self.build_region_index() if VALIDATE_POSTAL_CODES: self.build_postal_code_regex_index() districts_to_delete = [] query_statistics = [0 for i in range(8)] num_existing_postal_codes = PostalCode.objects.count() if num_existing_postal_codes == 0: self.logger.debug("Zero postal codes found - using only-create " "postal code optimization") for item in tqdm(data, disable=self.options.get('quiet'), total=total, desc="Importing postal codes"): if not self.call_hook('postal_code_pre', item): continue country_code = item['countryCode'] if country_code not in settings.postal_codes and 'ALL' not in settings.postal_codes: continue try: code = item['postalCode'] except KeyError: self.logger.warning("Postal code has no code: {} -- skipping".format(item)) continue # Find country try: country = self.country_index[country_code] except KeyError: self.logger.warning("Postal code '%s': Cannot find country: %s -- skipping", code, country_code) continue # Validate postal code against the country code = item['postalCode'] if VALIDATE_POSTAL_CODES and self.postal_code_regex_index[country_code].match(code) is None: self.logger.warning("Postal code didn't validate: {} ({})".format(code, country_code)) continue reg_name_q = Q(region_name__iexact=item['admin1Name']) subreg_name_q = Q(subregion_name__iexact=item['admin2Name']) dst_name_q = Q(district_name__iexact=item['admin3Name']) if hasattr(PostalCode, 'region'): reg_name_q |= Q(region__code=item['admin1Code']) if hasattr(PostalCode, 'subregion'): subreg_name_q |= Q(subregion__code=item['admin2Code']) if hasattr(PostalCode, 'district') and hasattr(District, 'code'): dst_name_q |= Q(district__code=item['admin3Code']) try: location = Point(float(item['longitude']), float(item['latitude'])) except ValueError: location = None if len(item['placeName']) >= 200: self.logger.warning("Postal code name has more than 200 characters: {}".format(item)) if num_existing_postal_codes > 0: postal_code_args = ( { 'args': (reg_name_q, subreg_name_q, dst_name_q), 'country': country, 'code': code, 'location': location, }, { 'args': (reg_name_q, subreg_name_q, dst_name_q), 'country': country, 'code': code, }, { 'args': (reg_name_q, subreg_name_q, dst_name_q), 'country': country, 'code': code, 'name__iexact': re.sub("'", '', item['placeName']), }, { 'args': tuple(), 'country': country, 'region__code': item['admin1Code'], }, { 'args': tuple(), 'country': country, 'code': code, 'name': item['placeName'], 'region__code': item['admin1Code'], 'subregion__code': item['admin2Code'], }, { 'args': tuple(), 'country': country, 'code': code, 'name': item['placeName'], 'region__code': item['admin1Code'], 'subregion__code': item['admin2Code'], 'district__code': item['admin3Code'], }, { 'args': tuple(), 'country': country, 'code': code, 'name': item['placeName'], 'region_name': item['admin1Name'], 'subregion_name': item['admin2Name'], }, { 'args': tuple(), 'country': country, 'code': code, 'name': item['placeName'], 'region_name': item['admin1Name'], 'subregion_name': item['admin2Name'], 'district_name': item['admin3Name'], } ) # We do this so we don't have to deal with exceptions being thrown # in the middle of transactions for args_dict in postal_code_args: num_pcs = PostalCode.objects.filter( *args_dict['args'], **{k: v for k, v in args_dict.items() if k != 'args'})\ .count() if num_pcs == 1: pc = PostalCode.objects.get( *args_dict['args'], **{k: v for k, v in args_dict.items() if k != 'args'}) break elif num_pcs > 1: pcs = PostalCode.objects.filter( *args_dict['args'], **{k: v for k, v in args_dict.items() if k != 'args'}) self.logger.debug("item: {}\nresults: {}".format(item, pcs)) # Raise a MultipleObjectsReturned exception PostalCode.objects.get( *args_dict['args'], **{k: v for k, v in args_dict.items() if k != 'args'}) else: self.logger.debug("Creating postal code: {}".format(item)) pc = PostalCode( country=country, code=code, name=item['placeName'], region_name=item['admin1Name'], subregion_name=item['admin2Name'], district_name=item['admin3Name']) else: self.logger.debug("Creating postal code: {}".format(item)) pc = PostalCode( country=country, code=code, name=item['placeName'], region_name=item['admin1Name'], subregion_name=item['admin2Name'], district_name=item['admin3Name']) if pc.region_name != '': try: with transaction.atomic(): pc.region = Region.objects.get( Q(name_std__iexact=pc.region_name) | Q(name__iexact=pc.region_name), country=pc.country) except Region.DoesNotExist: pc.region = None else: pc.region = None if pc.subregion_name != '': try: with transaction.atomic(): pc.subregion = Subregion.objects.get( Q(region__name_std__iexact=pc.region_name) | Q(region__name__iexact=pc.region_name), Q(name_std__iexact=pc.subregion_name) | Q(name__iexact=pc.subregion_name), region__country=pc.country) except Subregion.DoesNotExist: pc.subregion = None except Subregion.MultipleObjectsReturned: self.logger.warn("Found multiple subregions for '{}' in '{}' - ignoring".format( pc.region_name, pc.subregion_name)) self.logger.debug("item: {}\nsubregions: {}".format( item, Subregion.objects.filter( Q(region__name_std__iexact=pc.region_name) | Q(region__name__iexact=pc.region_name), Q(name_std__iexact=pc.subregion_name) | Q(name__iexact=pc.subregion_name), region__country=pc.country).values_list('id', flat=True))) pc.subregion = None else: pc.subregion = None if pc.district_name != '': try: with transaction.atomic(): pc.district = District.objects.get( Q(city__region__name_std__iexact=pc.region_name) | Q(city__region__name__iexact=pc.region_name), Q(name_std__iexact=pc.district_name) | Q(name__iexact=pc.district_name), city__country=pc.country) except District.MultipleObjectsReturned as e: self.logger.debug("item: {}\ndistricts: {}".format( item, District.objects.filter( Q(city__region__name_std__iexact=pc.region_name) | Q(city__region__name__iexact=pc.region_name), Q(name_std__iexact=pc.district_name) | Q(name__iexact=pc.district_name), city__country=pc.country).values_list('id', flat=True))) # If they're both part of the same city if District.objects.filter(Q(city__region__name_std__iexact=pc.region_name) | Q(city__region__name__iexact=pc.region_name), Q(name_std__iexact=pc.district_name) | Q(name__iexact=pc.district_name), city__country=pc.country)\ .values_list('city').distinct().count() == 1: # Use the one with the lower ID pc.district = District.objects.filter( Q(city__region__name_std__iexact=pc.region_name) | Q(city__region__name__iexact=pc.region_name), Q(name_std__iexact=pc.district_name) | Q(name__iexact=pc.district_name), city__country=pc.country).order_by('city__id').first() districts_to_delete.append(District.objects.filter( Q(city__region__name_std__iexact=pc.region_name) | Q(city__region__name__iexact=pc.region_name), Q(name_std__iexact=pc.district_name) | Q(name__iexact=pc.district_name), city__country=pc.country).order_by('city__id').last().id) else: raise e except District.DoesNotExist: pc.district = None else: pc.district = None if pc.district is not None: pc.city = pc.district.city else: pc.city = None try: pc.location = Point(float(item['longitude']), float(item['latitude'])) except Exception as e: self.logger.warning("Postal code %s (%s) - invalid location ('%s', '%s'): %s", pc.code, pc.country, item['longitude'], item['latitude'], str(e)) pc.location = None pc.save() if not self.call_hook('postal_code_post', pc, item): continue self.logger.debug("Added postal code: %s, %s", pc.country, pc) if num_existing_postal_codes > 0 and max(query_statistics) > 0: width = int(math.log10(max(query_statistics))) stats_str = "" for i, count in enumerate(query_statistics): stats_str = "{{}}\n{{:>2}} [{{:>{}}}]: {{}}".format(width)\ .format(stats_str, i, count, ''.join(['=' for i in range(count)])) self.logger.info("Postal code query statistics:\n{}".format(stats_str)) if districts_to_delete: self.logger.debug('districts to delete:\n{}'.format(districts_to_delete)) def flush_country(self): self.logger.info("Flushing country data") Country.objects.all().delete() def flush_region(self): self.logger.info("Flushing region data") Region.objects.all().delete() def flush_subregion(self): self.logger.info("Flushing subregion data") Subregion.objects.all().delete() def flush_city(self): self.logger.info("Flushing city data") City.objects.all().delete() def flush_district(self): self.logger.info("Flushing district data") District.objects.all().delete() def flush_postal_code(self): self.logger.info("Flushing postal code data") PostalCode.objects.all().delete() def flush_alt_name(self): self.logger.info("Flushing alternate name data") for type_ in (Country, Region, Subregion, City, District, PostalCode): plural_type_name = type_.__name__ if type_.__name__[-1] != 'y' else '{}ies'.format(type_.__name__[:-1]) for obj in tqdm(type_.objects.all(), disable=self.options.get('quiet'), total=type_.objects.count(), desc="Flushing alternative names for {}".format( plural_type_name)): obj.alt_names.all().delete()
[]
[]
[ "TRAVIS_LOGGER_NAME" ]
[]
["TRAVIS_LOGGER_NAME"]
python
1
0
providers/steam/steam_test.go
package steam_test import ( "github.com/timehop/markbates-goth" "github.com/timehop/markbates-goth/providers/steam" "github.com/stretchr/testify/assert" "os" "testing" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() a.Equal(p.APIKey, os.Getenv("STEAM_KEY")) a.Equal(p.CallbackURL, "/foo") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), provider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*steam.Session) a.NoError(err) a.Contains(s.AuthURL, "steamcommunity.com/openid/login") a.Contains(s.AuthURL, "foo") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.UnmarshalSession(`{"AuthURL":"https://steamcommunity.com/openid/login?openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.realm=%3A%2F%2F&openid.return_to=%2Ffoo","SteamID":"1234567890","CallbackURL":"http://localhost:3030/","ResponseNonce":"2016-03-13T16:56:30ZJ8tlKVquwHi9ZSPV4ElU5PY2dmI="}`) a.NoError(err) s := session.(*steam.Session) a.Equal(s.AuthURL, "https://steamcommunity.com/openid/login?openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.realm=%3A%2F%2F&openid.return_to=%2Ffoo") a.Equal(s.CallbackURL, "http://localhost:3030/") a.Equal(s.SteamID, "1234567890") a.Equal(s.ResponseNonce, "2016-03-13T16:56:30ZJ8tlKVquwHi9ZSPV4ElU5PY2dmI=") } func provider() *steam.Provider { return steam.New(os.Getenv("STEAM_KEY"), "/foo") }
[ "\"STEAM_KEY\"", "\"STEAM_KEY\"" ]
[]
[ "STEAM_KEY" ]
[]
["STEAM_KEY"]
go
1
0
mctest.go
// Package mctest provides standalone instances of memcache suitable for use in // tests. package mctest import ( "fmt" "io" "io/ioutil" "net" "os" "os/exec" "time" "github.com/bradfitz/gomemcache/memcache" "github.com/facebookgo/freeport" "github.com/facebookgo/testname" "github.com/facebookgo/waitout" ) var serverListening = []byte("server listening") // Fatalf is satisfied by testing.T or testing.B. type Fatalf interface { Fatalf(format string, args ...interface{}) } // Server is a unique instance of a memcached. type Server struct { Port int StopTimeout time.Duration T Fatalf cmd *exec.Cmd pidFile string } // Start the server, this will return once the server has been started. func (s *Server) Start() { port, err := freeport.Get() if err != nil { s.T.Fatalf(err.Error()) } s.Port = port waiter := waitout.New(serverListening) s.pidFile = getPidFilePath(s.T) s.cmd = exec.Command( "memcached", "-vv", "-l", s.Addr(), "-m", "8", "-I", "256k", "-P", s.pidFile, ) if os.Getenv("MCTEST_VERBOSE") == "1" { s.cmd.Stdout = os.Stdout s.cmd.Stderr = io.MultiWriter(os.Stderr, waiter) } else { s.cmd.Stderr = waiter } if err := s.cmd.Start(); err != nil { s.T.Fatalf(err.Error()) } waiter.Wait() // Wait until TCP socket is active to ensure we don't progress until the // server is ready to accept. for { if c, err := net.Dial("tcp", s.Addr()); err == nil { c.Close() break } } } // Addr for the server. func (s *Server) Addr() string { return fmt.Sprintf("127.0.0.1:%d", s.Port) } // Stop the server. func (s *Server) Stop() { fin := make(chan struct{}) go func() { defer close(fin) s.cmd.Process.Kill() s.cmd.Wait() os.Remove(s.pidFile) }() select { case <-fin: case <-time.After(s.StopTimeout): } } // Client returns a memcache.Client connected to the underlying server. func (s *Server) Client() *memcache.Client { c := memcache.New(s.Addr()) c.Timeout = time.Second return c } // NewStartedServer creates a new server starts it. func NewStartedServer(t Fatalf) *Server { for { s := &Server{ T: t, StopTimeout: 15 * time.Second, } start := make(chan struct{}) go func() { defer close(start) s.Start() }() select { case <-start: return s case <-time.After(10 * time.Second): } } } func getPidFilePath(f Fatalf) string { file, err := ioutil.TempFile("", testname.Get("MC")) if err != nil { f.Fatalf(err.Error()) } name := file.Name() if err := file.Close(); err != nil { f.Fatalf(err.Error()) } if err := os.Remove(name); err != nil { f.Fatalf(err.Error()) } return name }
[ "\"MCTEST_VERBOSE\"" ]
[]
[ "MCTEST_VERBOSE" ]
[]
["MCTEST_VERBOSE"]
go
1
0
pkg/nfd-worker/nfd-worker.go
/* Copyright 2019-2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nfdworker import ( "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "regexp" "strings" "time" "github.com/fsnotify/fsnotify" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/klog/v2" "sigs.k8s.io/yaml" pb "sigs.k8s.io/node-feature-discovery/pkg/labeler" "sigs.k8s.io/node-feature-discovery/pkg/utils" "sigs.k8s.io/node-feature-discovery/pkg/version" "sigs.k8s.io/node-feature-discovery/source" "sigs.k8s.io/node-feature-discovery/source/cpu" "sigs.k8s.io/node-feature-discovery/source/custom" "sigs.k8s.io/node-feature-discovery/source/fake" "sigs.k8s.io/node-feature-discovery/source/iommu" "sigs.k8s.io/node-feature-discovery/source/kernel" "sigs.k8s.io/node-feature-discovery/source/local" "sigs.k8s.io/node-feature-discovery/source/memory" "sigs.k8s.io/node-feature-discovery/source/network" "sigs.k8s.io/node-feature-discovery/source/panic_fake" "sigs.k8s.io/node-feature-discovery/source/pci" "sigs.k8s.io/node-feature-discovery/source/storage" "sigs.k8s.io/node-feature-discovery/source/system" "sigs.k8s.io/node-feature-discovery/source/usb" ) var ( nodeName = os.Getenv("NODE_NAME") ) // Global config type NFDConfig struct { Core coreConfig Sources sourcesConfig } type coreConfig struct { Klog map[string]string LabelWhiteList utils.RegexpVal NoPublish bool Sources []string SleepInterval duration } type sourcesConfig map[string]source.Config // Labels are a Kubernetes representation of discovered features. type Labels map[string]string // Command line arguments type Args struct { CaFile string CertFile string KeyFile string ConfigFile string Options string Oneshot bool Server string ServerNameOverride string Klog map[string]*utils.KlogFlagVal Overrides ConfigOverrideArgs } // ConfigOverrideArgs are args that override config file options type ConfigOverrideArgs struct { NoPublish *bool // Deprecated LabelWhiteList *utils.RegexpVal SleepInterval *time.Duration Sources *utils.StringSliceVal } type NfdWorker interface { Run() error Stop() } type nfdWorker struct { args Args clientConn *grpc.ClientConn client pb.LabelerClient configFilePath string config *NFDConfig realSources []source.FeatureSource stop chan struct{} // channel for signaling stop testSources []source.FeatureSource enabledSources []source.FeatureSource } type duration struct { time.Duration } // Create new NfdWorker instance. func NewNfdWorker(args *Args) (NfdWorker, error) { nfd := &nfdWorker{ args: *args, config: &NFDConfig{}, realSources: []source.FeatureSource{ &cpu.Source{}, &iommu.Source{}, &kernel.Source{}, &memory.Source{}, &network.Source{}, &pci.Source{}, &storage.Source{}, &system.Source{}, &usb.Source{}, &custom.Source{}, // local needs to be the last source so that it is able to override // labels from other sources &local.Source{}, }, testSources: []source.FeatureSource{ &fake.Source{}, &panicfake.Source{}, }, stop: make(chan struct{}, 1), } if args.ConfigFile != "" { nfd.configFilePath = filepath.Clean(args.ConfigFile) } // Check TLS related args if args.CertFile != "" || args.KeyFile != "" || args.CaFile != "" { if args.CertFile == "" { return nfd, fmt.Errorf("--cert-file needs to be specified alongside --key-file and --ca-file") } if args.KeyFile == "" { return nfd, fmt.Errorf("--key-file needs to be specified alongside --cert-file and --ca-file") } if args.CaFile == "" { return nfd, fmt.Errorf("--ca-file needs to be specified alongside --cert-file and --key-file") } } return nfd, nil } func addConfigWatch(path string) (*fsnotify.Watcher, map[string]struct{}, error) { paths := make(map[string]struct{}) // Create watcher w, err := fsnotify.NewWatcher() if err != nil { return w, paths, fmt.Errorf("failed to create fsnotify watcher: %v", err) } // Add watches for all directory components so that we catch e.g. renames // upper in the tree added := false for p := path; ; p = filepath.Dir(p) { if err := w.Add(p); err != nil { klog.V(1).Infof("failed to add fsnotify watch for %q: %v", p, err) } else { klog.V(1).Infof("added fsnotify watch %q", p) added = true } paths[p] = struct{}{} if filepath.Dir(p) == p { break } } if !added { // Want to be sure that we watch something return w, paths, fmt.Errorf("failed to add any watch") } return w, paths, nil } func newDefaultConfig() *NFDConfig { return &NFDConfig{ Core: coreConfig{ LabelWhiteList: utils.RegexpVal{Regexp: *regexp.MustCompile("")}, SleepInterval: duration{60 * time.Second}, Sources: []string{"all"}, Klog: make(map[string]string), }, } } // Run NfdWorker client. Returns if a fatal error is encountered, or, after // one request if OneShot is set to 'true' in the worker args. func (w *nfdWorker) Run() error { klog.Infof("Node Feature Discovery Worker %s", version.Get()) klog.Infof("NodeName: '%s'", nodeName) // Create watcher for config file and read initial configuration configWatch, paths, err := addConfigWatch(w.configFilePath) if err != nil { return err } if err := w.configure(w.configFilePath, w.args.Options); err != nil { return err } // Connect to NFD master err = w.connect() if err != nil { return fmt.Errorf("failed to connect: %v", err) } defer w.disconnect() labelTrigger := time.After(0) var configTrigger <-chan time.Time for { select { case <-labelTrigger: // Get the set of feature labels. labels := createFeatureLabels(w.enabledSources, w.config.Core.LabelWhiteList.Regexp) // Update the node with the feature labels. if w.client != nil { err := advertiseFeatureLabels(w.client, labels) if err != nil { return fmt.Errorf("failed to advertise labels: %s", err.Error()) } } if w.args.Oneshot { return nil } if w.config.Core.SleepInterval.Duration > 0 { labelTrigger = time.After(w.config.Core.SleepInterval.Duration) } case e := <-configWatch.Events: name := filepath.Clean(e.Name) // If any of our paths (directories or the file itself) change if _, ok := paths[name]; ok { klog.Infof("fsnotify event in %q detected, reconfiguring fsnotify and reloading configuration", name) // Blindly remove existing watch and add a new one if err := configWatch.Close(); err != nil { klog.Warningf("failed to close fsnotify watcher: %v", err) } configWatch, paths, err = addConfigWatch(w.configFilePath) if err != nil { return err } // Rate limiter. In certain filesystem operations we get // numerous events in quick succession and we only want one // config re-load configTrigger = time.After(time.Second) } case e := <-configWatch.Errors: klog.Errorf("config file watcher error: %v", e) case <-configTrigger: if err := w.configure(w.configFilePath, w.args.Options); err != nil { return err } // Manage connection to master if w.config.Core.NoPublish { w.disconnect() } else if w.clientConn == nil { if err := w.connect(); err != nil { return err } } // Always re-label after a re-config event. This way the new config // comes into effect even if the sleep interval is long (or infinite) labelTrigger = time.After(0) case <-w.stop: klog.Infof("shutting down nfd-worker") configWatch.Close() return nil } } } // Stop NfdWorker func (w *nfdWorker) Stop() { select { case w.stop <- struct{}{}: default: } } // connect creates a client connection to the NFD master func (w *nfdWorker) connect() error { // Return a dummy connection in case of dry-run if w.config.Core.NoPublish { return nil } // Check that if a connection already exists if w.clientConn != nil { return fmt.Errorf("client connection already exists") } // Dial and create a client dialCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() dialOpts := []grpc.DialOption{grpc.WithBlock()} if w.args.CaFile != "" || w.args.CertFile != "" || w.args.KeyFile != "" { // Load client cert for client authentication cert, err := tls.LoadX509KeyPair(w.args.CertFile, w.args.KeyFile) if err != nil { return fmt.Errorf("failed to load client certificate: %v", err) } // Load CA cert for server cert verification caCert, err := ioutil.ReadFile(w.args.CaFile) if err != nil { return fmt.Errorf("failed to read root certificate file: %v", err) } caPool := x509.NewCertPool() if ok := caPool.AppendCertsFromPEM(caCert); !ok { return fmt.Errorf("failed to add certificate from '%s'", w.args.CaFile) } // Create TLS config tlsConfig := &tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caPool, ServerName: w.args.ServerNameOverride, } dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) } else { dialOpts = append(dialOpts, grpc.WithInsecure()) } conn, err := grpc.DialContext(dialCtx, w.args.Server, dialOpts...) if err != nil { return err } w.clientConn = conn w.client = pb.NewLabelerClient(conn) return nil } // disconnect closes the connection to NFD master func (w *nfdWorker) disconnect() { if w.clientConn != nil { w.clientConn.Close() } w.clientConn = nil w.client = nil } func (c *coreConfig) sanitize() { if c.SleepInterval.Duration > 0 && c.SleepInterval.Duration < time.Second { klog.Warningf("too short sleep-intervall specified (%s), forcing to 1s", c.SleepInterval.Duration.String()) c.SleepInterval = duration{time.Second} } } func (w *nfdWorker) configureCore(c coreConfig) error { // Handle klog for k, a := range w.args.Klog { if !a.IsSetFromCmdline() { v, ok := c.Klog[k] if !ok { v = a.DefValue() } if err := a.SetFromConfig(v); err != nil { return err } } } for k := range c.Klog { if _, ok := w.args.Klog[k]; !ok { klog.Warningf("unknown logger option in config: %q", k) } } // Determine enabled feature sources sourceList := map[string]struct{}{} all := false for _, s := range c.Sources { if s == "all" { all = true continue } sourceList[strings.TrimSpace(s)] = struct{}{} } w.enabledSources = []source.FeatureSource{} for _, s := range w.realSources { if _, enabled := sourceList[s.Name()]; all || enabled { w.enabledSources = append(w.enabledSources, s) delete(sourceList, s.Name()) } } for _, s := range w.testSources { if _, enabled := sourceList[s.Name()]; enabled { w.enabledSources = append(w.enabledSources, s) delete(sourceList, s.Name()) } } if len(sourceList) > 0 { names := make([]string, 0, len(sourceList)) for n := range sourceList { names = append(names, n) } klog.Warningf("skipping unknown source(s) %q specified in core.sources (or --sources)", strings.Join(names, ", ")) } return nil } // Parse configuration options func (w *nfdWorker) configure(filepath string, overrides string) error { // Create a new default config c := newDefaultConfig() allSources := append(w.realSources, w.testSources...) c.Sources = make(map[string]source.Config, len(allSources)) for _, s := range allSources { c.Sources[s.Name()] = s.NewConfig() } // Try to read and parse config file if filepath != "" { data, err := ioutil.ReadFile(filepath) if err != nil { if os.IsNotExist(err) { klog.Infof("config file %q not found, using defaults", filepath) } else { return fmt.Errorf("error reading config file: %s", err) } } else { err = yaml.Unmarshal(data, c) if err != nil { return fmt.Errorf("Failed to parse config file: %s", err) } klog.Infof("Configuration successfully loaded from %q", filepath) } } // Parse config overrides if err := yaml.Unmarshal([]byte(overrides), c); err != nil { return fmt.Errorf("Failed to parse --options: %s", err) } if w.args.Overrides.LabelWhiteList != nil { c.Core.LabelWhiteList = *w.args.Overrides.LabelWhiteList } if w.args.Overrides.NoPublish != nil { c.Core.NoPublish = *w.args.Overrides.NoPublish } if w.args.Overrides.SleepInterval != nil { c.Core.SleepInterval = duration{*w.args.Overrides.SleepInterval} } if w.args.Overrides.Sources != nil { c.Core.Sources = *w.args.Overrides.Sources } c.Core.sanitize() w.config = c if err := w.configureCore(c.Core); err != nil { return err } // (Re-)configure all "real" sources, test sources are not configurable for _, s := range allSources { s.SetConfig(c.Sources[s.Name()]) } return nil } // createFeatureLabels returns the set of feature labels from the enabled // sources and the whitelist argument. func createFeatureLabels(sources []source.FeatureSource, labelWhiteList regexp.Regexp) (labels Labels) { labels = Labels{} // Do feature discovery from all configured sources. for _, source := range sources { labelsFromSource, err := getFeatureLabels(source, labelWhiteList) if err != nil { klog.Errorf("discovery failed for source %q: %v", source.Name(), err) continue } for name, value := range labelsFromSource { // Log discovered feature. klog.Infof("%s = %s", name, value) labels[name] = value } } return labels } // getFeatureLabels returns node labels for features discovered by the // supplied source. func getFeatureLabels(source source.FeatureSource, labelWhiteList regexp.Regexp) (labels Labels, err error) { defer func() { if r := recover(); r != nil { klog.Errorf("panic occurred during discovery of source [%s]: %v", source.Name(), r) err = fmt.Errorf("%v", r) } }() labels = Labels{} features, err := source.Discover() if err != nil { return nil, err } // Prefix for labels in the default namespace prefix := source.Name() + "-" switch source.(type) { case *local.Source: // Do not prefix labels from the hooks prefix = "" } for k, v := range features { // Split label name into namespace and name compoents. Use dummy 'ns' // default namespace because there is no function to validate just // the name part split := strings.SplitN(k, "/", 2) label := prefix + split[0] nameForValidation := "ns/" + label nameForWhiteListing := label if len(split) == 2 { label = k nameForValidation = label nameForWhiteListing = split[1] } // Validate label name. errs := validation.IsQualifiedName(nameForValidation) if len(errs) > 0 { klog.Warningf("Ignoring invalid feature name '%s': %s", label, errs) continue } value := fmt.Sprintf("%v", v) // Validate label value errs = validation.IsValidLabelValue(value) if len(errs) > 0 { klog.Warningf("Ignoring invalid feature value %s=%s: %s", label, value, errs) continue } // Skip if label doesn't match labelWhiteList if !labelWhiteList.MatchString(nameForWhiteListing) { klog.Infof("%q does not match the whitelist (%s) and will not be published.", nameForWhiteListing, labelWhiteList.String()) continue } labels[label] = value } return labels, nil } // advertiseFeatureLabels advertises the feature labels to a Kubernetes node // via the NFD server. func advertiseFeatureLabels(client pb.LabelerClient, labels Labels) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() klog.Infof("Sending labeling request to nfd-master") labelReq := pb.SetLabelsRequest{Labels: labels, NfdVersion: version.Get(), NodeName: nodeName} _, err := client.SetLabels(ctx, &labelReq) if err != nil { klog.Errorf("failed to set node labels: %v", err) return err } return nil } // UnmarshalJSON implements the Unmarshaler interface from "encoding/json" func (d *duration) UnmarshalJSON(data []byte) error { var v interface{} if err := json.Unmarshal(data, &v); err != nil { return err } switch val := v.(type) { case float64: d.Duration = time.Duration(val) case string: var err error d.Duration, err = time.ParseDuration(val) if err != nil { return err } default: return fmt.Errorf("invalid duration %s", data) } return nil } // UnmarshalJSON implements the Unmarshaler interface from "encoding/json" func (c *sourcesConfig) UnmarshalJSON(data []byte) error { // First do a raw parse to get the per-source data raw := map[string]json.RawMessage{} err := yaml.Unmarshal(data, &raw) if err != nil { return err } // Then parse each source-specific data structure // NOTE: we expect 'c' to be pre-populated with correct per-source data // types. Non-pre-populated keys are ignored. for k, rawv := range raw { if v, ok := (*c)[k]; ok { err := yaml.Unmarshal(rawv, &v) if err != nil { return fmt.Errorf("failed to parse %q source config: %v", k, err) } } } return nil }
[ "\"NODE_NAME\"" ]
[]
[ "NODE_NAME" ]
[]
["NODE_NAME"]
go
1
0
pkg/config/config.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2018 Datadog, Inc. package config import ( "fmt" "net/url" "os" "path/filepath" "strings" "time" log "github.com/cihub/seelog" "github.com/spf13/viper" "github.com/DataDog/datadog-agent/pkg/version" ) // DefaultForwarderRecoveryInterval is the default recovery interval, also used if // the user-provided value is invalid. const DefaultForwarderRecoveryInterval = 2 // Datadog is the global configuration object var Datadog = viper.New() // MetadataProviders helps unmarshalling `metadata_providers` config param type MetadataProviders struct { Name string `mapstructure:"name"` Interval time.Duration `mapstructure:"interval"` } // ConfigurationProviders helps unmarshalling `config_providers` config param type ConfigurationProviders struct { Name string `mapstructure:"name"` Polling bool `mapstructure:"polling"` TemplateURL string `mapstructure:"template_url"` TemplateDir string `mapstructure:"template_dir"` Username string `mapstructure:"username"` Password string `mapstructure:"password"` CAFile string `mapstructure:"ca_file"` CAPath string `mapstructure:"ca_path"` CertFile string `mapstructure:"cert_file"` KeyFile string `mapstructure:"key_file"` Token string `mapstructure:"token"` } // Listeners helps unmarshalling `listeners` config param type Listeners struct { Name string `mapstructure:"name"` } // Proxy represents the configuration for proxies in the agent type Proxy struct { HTTP string `mapstructure:"http"` HTTPS string `mapstructure:"https"` NoProxy []string `mapstructure:"no_proxy"` } func init() { // config identifiers Datadog.SetConfigName("datadog") Datadog.SetEnvPrefix("DD") Datadog.SetTypeByDefaultValue(true) // Replace '.' from config keys with '_' in env variables bindings. // e.g. : BindEnv("foo.bar") will bind config key // "foo.bar" to env variable "FOO_BAR" Datadog.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) // Configuration defaults // Agent Datadog.SetDefault("dd_url", "https://app.datadoghq.com") Datadog.SetDefault("app_key", "") Datadog.SetDefault("proxy", nil) Datadog.SetDefault("skip_ssl_validation", false) Datadog.SetDefault("hostname", "") Datadog.SetDefault("tags", []string{}) Datadog.SetDefault("conf_path", ".") Datadog.SetDefault("confd_path", defaultConfdPath) Datadog.SetDefault("confd_dca_path", defaultDCAConfdPath) Datadog.SetDefault("use_metadata_mapper", true) Datadog.SetDefault("additional_checksd", defaultAdditionalChecksPath) Datadog.SetDefault("log_payloads", false) Datadog.SetDefault("log_level", "info") Datadog.SetDefault("log_to_syslog", false) Datadog.SetDefault("log_to_console", true) Datadog.SetDefault("logging_frequency", int64(20)) Datadog.SetDefault("disable_file_logging", false) Datadog.SetDefault("syslog_uri", "") Datadog.SetDefault("syslog_rfc", false) Datadog.SetDefault("syslog_tls", false) Datadog.SetDefault("syslog_pem", "") Datadog.SetDefault("cmd_host", "localhost") Datadog.SetDefault("cmd_port", 5001) Datadog.SetDefault("cluster_agent_cmd_port", 5005) Datadog.SetDefault("default_integration_http_timeout", 9) Datadog.SetDefault("enable_metadata_collection", true) Datadog.SetDefault("enable_gohai", true) Datadog.SetDefault("check_runners", int64(1)) Datadog.SetDefault("expvar_port", "5000") Datadog.SetDefault("auth_token_file_path", "") Datadog.SetDefault("bind_host", "localhost") // Retry settings Datadog.SetDefault("forwarder_backoff_factor", 2) Datadog.SetDefault("forwarder_backoff_base", 2) Datadog.SetDefault("forwarder_backoff_max", 64) Datadog.SetDefault("forwarder_recovery_interval", DefaultForwarderRecoveryInterval) Datadog.SetDefault("forwarder_recovery_reset", false) // Use to output logs in JSON format BindEnvAndSetDefault("log_format_json", false) // IPC API server timeout BindEnvAndSetDefault("server_timeout", 15) // Use to force client side TLS version to 1.2 BindEnvAndSetDefault("force_tls_12", false) // Agent GUI access port Datadog.SetDefault("GUI_port", defaultGuiPort) if IsContainerized() { Datadog.SetDefault("container_proc_root", "/host/proc") Datadog.SetDefault("procfs_path", "/host/proc") Datadog.SetDefault("container_cgroup_root", "/host/sys/fs/cgroup/") Datadog.BindEnv("procfs_path") } else { Datadog.SetDefault("container_proc_root", "/proc") // for amazon linux the cgroup directory on host is /cgroup/ // we pick memory.stat to make sure it exists and not empty if _, err := os.Stat("/cgroup/memory/memory.stat"); !os.IsNotExist(err) { Datadog.SetDefault("container_cgroup_root", "/cgroup/") } else { Datadog.SetDefault("container_cgroup_root", "/sys/fs/cgroup/") } } Datadog.SetDefault("proc_root", "/proc") Datadog.SetDefault("histogram_aggregates", []string{"max", "median", "avg", "count"}) Datadog.SetDefault("histogram_percentiles", []string{"0.95"}) // Serializer Datadog.SetDefault("use_v2_api.series", false) Datadog.SetDefault("use_v2_api.events", false) Datadog.SetDefault("use_v2_api.service_checks", false) // Forwarder Datadog.SetDefault("forwarder_timeout", 20) Datadog.SetDefault("forwarder_retry_queue_max_size", 30) BindEnvAndSetDefault("forwarder_num_workers", 1) // Dogstatsd Datadog.SetDefault("use_dogstatsd", true) Datadog.SetDefault("dogstatsd_port", 8125) // Notice: 0 means UDP port closed Datadog.SetDefault("dogstatsd_buffer_size", 1024*8) // 8KB buffer Datadog.SetDefault("dogstatsd_non_local_traffic", false) Datadog.SetDefault("dogstatsd_socket", "") // Notice: empty means feature disabled Datadog.SetDefault("dogstatsd_stats_port", 5000) Datadog.SetDefault("dogstatsd_stats_enable", false) Datadog.SetDefault("dogstatsd_stats_buffer", 10) Datadog.SetDefault("dogstatsd_expiry_seconds", 300) Datadog.SetDefault("dogstatsd_origin_detection", false) // Only supported for socket traffic Datadog.SetDefault("statsd_forward_host", "") Datadog.SetDefault("statsd_forward_port", 0) BindEnvAndSetDefault("statsd_metric_namespace", "") // Autoconfig Datadog.SetDefault("autoconf_template_dir", "/datadog/check_configs") Datadog.SetDefault("exclude_pause_container", true) Datadog.SetDefault("ac_include", []string{}) Datadog.SetDefault("ac_exclude", []string{}) // Docker BindEnvAndSetDefault("docker_query_timeout", int64(5)) Datadog.SetDefault("docker_labels_as_tags", map[string]string{}) Datadog.SetDefault("docker_env_as_tags", map[string]string{}) Datadog.SetDefault("kubernetes_pod_labels_as_tags", map[string]string{}) Datadog.SetDefault("kubernetes_pod_annotations_as_tags", map[string]string{}) Datadog.SetDefault("kubernetes_node_labels_as_tags", map[string]string{}) // Kubernetes Datadog.SetDefault("kubernetes_http_kubelet_port", 10255) Datadog.SetDefault("kubernetes_https_kubelet_port", 10250) Datadog.SetDefault("kubelet_tls_verify", true) Datadog.SetDefault("kubelet_client_ca", "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") Datadog.SetDefault("kubelet_auth_token_path", "") Datadog.SetDefault("kubelet_client_crt", "") Datadog.SetDefault("kubelet_client_key", "") Datadog.SetDefault("kubernetes_collect_metadata_tags", true) Datadog.SetDefault("kubernetes_metadata_tag_update_freq", 60*5) // 5 min // Kube ApiServer Datadog.SetDefault("kubernetes_kubeconfig_path", "") Datadog.SetDefault("leader_lease_duration", "60") Datadog.SetDefault("leader_election", false) Datadog.SetDefault("kube_resources_namespace", "") // Datadog cluster agent Datadog.SetDefault("cluster_agent", false) Datadog.SetDefault("cluster_agent.auth_token", "") Datadog.SetDefault("cluster_agent.url", "") Datadog.SetDefault("cluster_agent.kubernetes_service_name", "dca") // ECS Datadog.SetDefault("ecs_agent_url", "") // Will be autodetected Datadog.SetDefault("collect_ec2_tags", false) // Cloud Foundry Datadog.SetDefault("cloud_foundry", false) Datadog.SetDefault("bosh_id", "") // JMXFetch BindEnvAndSetDefault("jmx_custom_jars", []string{}) BindEnvAndSetDefault("jmx_use_cgroup_memory_limit", false) // Go_expvar server port Datadog.SetDefault("expvar_port", "5000") // Trace agent Datadog.SetDefault("apm_config.enabled", true) // Logs Agent BindEnvAndSetDefault("logs_enabled", false) BindEnvAndSetDefault("log_enabled", false) // deprecated, use logs_enabled instead BindEnvAndSetDefault("logset", "") BindEnvAndSetDefault("logs_config.dd_url", "agent-intake.logs.datadoghq.com") BindEnvAndSetDefault("logs_config.dd_port", 10516) BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true) BindEnvAndSetDefault("logs_config.run_path", defaultRunPath) BindEnvAndSetDefault("logs_config.open_files_limit", 100) BindEnvAndSetDefault("logs_config.container_collect_all", false) // Tagger full cardinality mode // Undocumented opt-in feature for now BindEnvAndSetDefault("full_cardinality_tagging", false) // ENV vars bindings Datadog.BindEnv("api_key") Datadog.BindEnv("dd_url") Datadog.BindEnv("app_key") Datadog.BindEnv("hostname") Datadog.BindEnv("tags") Datadog.BindEnv("cmd_port") Datadog.BindEnv("conf_path") Datadog.BindEnv("enable_metadata_collection") Datadog.BindEnv("enable_gohai") Datadog.BindEnv("dogstatsd_port") Datadog.BindEnv("bind_host") Datadog.BindEnv("proc_root") Datadog.BindEnv("container_proc_root") Datadog.BindEnv("container_cgroup_root") Datadog.BindEnv("dogstatsd_socket") Datadog.BindEnv("dogstatsd_stats_port") Datadog.BindEnv("dogstatsd_non_local_traffic") Datadog.BindEnv("dogstatsd_origin_detection") Datadog.BindEnv("check_runners") Datadog.BindEnv("log_file") Datadog.BindEnv("log_level") Datadog.BindEnv("log_to_console") Datadog.BindEnv("kubernetes_kubelet_host") Datadog.BindEnv("kubernetes_http_kubelet_port") Datadog.BindEnv("kubernetes_https_kubelet_port") Datadog.BindEnv("kubelet_client_crt") Datadog.BindEnv("kubelet_client_key") Datadog.BindEnv("kubelet_tls_verify") Datadog.BindEnv("collect_kubernetes_events") Datadog.BindEnv("kubernetes_collect_metadata_tags") Datadog.BindEnv("kubernetes_metadata_tag_update_freq") Datadog.BindEnv("docker_labels_as_tags") Datadog.BindEnv("docker_env_as_tags") Datadog.BindEnv("kubernetes_pod_labels_as_tags") Datadog.BindEnv("kubernetes_pod_annotations_as_tags") Datadog.BindEnv("kubernetes_node_labels_as_tags") Datadog.BindEnv("ac_include") Datadog.BindEnv("ac_exclude") Datadog.BindEnv("cluster_agent") Datadog.BindEnv("cluster_agent.url") Datadog.BindEnv("cluster_agent.auth_token") Datadog.BindEnv("cluster_agent_cmd_port") Datadog.BindEnv("forwarder_timeout") Datadog.BindEnv("forwarder_retry_queue_max_size") Datadog.BindEnv("cloud_foundry") Datadog.BindEnv("bosh_id") Datadog.BindEnv("histogram_aggregates") Datadog.BindEnv("histogram_percentiles") Datadog.BindEnv("kubernetes_kubeconfig_path") Datadog.BindEnv("leader_election") Datadog.BindEnv("leader_lease_duration") Datadog.BindEnv("kube_resources_namespace") Datadog.BindEnv("collect_ec2_tags") } // BindEnvAndSetDefault sets the default value for a config parameter, and adds an env binding func BindEnvAndSetDefault(key string, val interface{}) { Datadog.SetDefault(key, val) Datadog.BindEnv(key) } var ( ddURLs = map[string]interface{}{ "app.datadoghq.com": nil, "app.datad0g.com": nil, } ) // GetMultipleEndpoints returns the api keys per domain specified in the main agent config func GetMultipleEndpoints() (map[string][]string, error) { return getMultipleEndpoints(Datadog) } // getDomainPrefix provides the right prefix for agent X.Y.Z func getDomainPrefix(app string) string { v, _ := version.New(version.AgentVersion, version.Commit) return fmt.Sprintf("%d-%d-%d-%s.agent", v.Major, v.Minor, v.Patch, app) } // addAgentVersionToDomain prefix the domain with the agent version: X-Y-Z.domain func addAgentVersionToDomain(domain string, app string) (string, error) { u, err := url.Parse(domain) if err != nil { return "", err } // we don't udpdate unknown URL (ie: proxy or custom StatsD server) if _, found := ddURLs[u.Host]; !found { return domain, nil } subdomain := strings.Split(u.Host, ".")[0] newSubdomain := getDomainPrefix(app) u.Host = strings.Replace(u.Host, subdomain, newSubdomain, 1) return u.String(), nil } // getMultipleEndpoints implements the logic to extract the api keys per domain from an agent config func getMultipleEndpoints(config *viper.Viper) (map[string][]string, error) { ddURL := config.GetString("dd_url") updatedDDUrl, err := addAgentVersionToDomain(ddURL, "app") if err != nil { return nil, fmt.Errorf("Could not parse 'dd_url': %s", err) } keysPerDomain := map[string][]string{ updatedDDUrl: { config.GetString("api_key"), }, } var additionalEndpoints map[string][]string err = config.UnmarshalKey("additional_endpoints", &additionalEndpoints) if err != nil { return keysPerDomain, err } // merge additional endpoints into keysPerDomain for domain, apiKeys := range additionalEndpoints { updatedDomain, err := addAgentVersionToDomain(domain, "app") if err != nil { return nil, fmt.Errorf("Could not parse url from 'additional_endpoints' %s: %s", domain, err) } if _, ok := keysPerDomain[updatedDomain]; ok { for _, apiKey := range apiKeys { keysPerDomain[updatedDomain] = append(keysPerDomain[updatedDomain], apiKey) } } else { keysPerDomain[updatedDomain] = apiKeys } } // dedupe api keys and remove domains with no api keys (or empty ones) for domain, apiKeys := range keysPerDomain { dedupedAPIKeys := make([]string, 0, len(apiKeys)) seen := make(map[string]bool) for _, apiKey := range apiKeys { trimmedAPIKey := strings.TrimSpace(apiKey) if _, ok := seen[trimmedAPIKey]; !ok && trimmedAPIKey != "" { seen[trimmedAPIKey] = true dedupedAPIKeys = append(dedupedAPIKeys, trimmedAPIKey) } } if len(dedupedAPIKeys) > 0 { keysPerDomain[domain] = dedupedAPIKeys } else { log.Infof("No API key provided for domain \"%s\", removing domain from endpoints", domain) delete(keysPerDomain, domain) } } return keysPerDomain, nil } // IsContainerized returns whether the Agent is running on a Docker container func IsContainerized() bool { return os.Getenv("DOCKER_DD_AGENT") == "yes" } // FileUsedDir returns the absolute path to the folder containing the config // file used to populate the registry func FileUsedDir() string { return filepath.Dir(Datadog.ConfigFileUsed()) } // IsKubernetes returns whether the Agent is running on a kubernetes cluster func IsKubernetes() bool { // Injected by Kubernetes itself if os.Getenv("KUBERNETES_SERVICE_PORT") != "" { return true } // support of Datadog environment variable for Kubernetes if os.Getenv("KUBERNETES") != "" { return true } return false }
[ "\"DOCKER_DD_AGENT\"", "\"KUBERNETES_SERVICE_PORT\"", "\"KUBERNETES\"" ]
[]
[ "DOCKER_DD_AGENT", "KUBERNETES", "KUBERNETES_SERVICE_PORT" ]
[]
["DOCKER_DD_AGENT", "KUBERNETES", "KUBERNETES_SERVICE_PORT"]
go
3
0
appdir_xdg.go
// +build !darwin,!windows package appdir import ( "os" "path/filepath" ) type dirs struct { name string } func (d *dirs) UserConfig() string { baseDir := filepath.Join(os.Getenv("HOME"), ".config") if d := os.Getenv("XDG_CONFIG_HOME"); d != "" { baseDir = d } return filepath.Join(baseDir, d.name) } func (d *dirs) UserCache() string { baseDir := filepath.Join(os.Getenv("HOME"), ".cache") if d := os.Getenv("XDG_CACHE_HOME"); d != "" { baseDir = d } return filepath.Join(baseDir, d.name) } func (d *dirs) UserLogs() string { baseDir := filepath.Join(os.Getenv("HOME"), ".local", "state") if d := os.Getenv("XDG_STATE_HOME"); d != "" { baseDir = d } return filepath.Join(baseDir, d.name) } func (d *dirs) UserData() string { baseDir := filepath.Join(os.Getenv("HOME"), ".local", "share") if d := os.Getenv("XDG_DATA_HOME"); d != "" { baseDir = d } return filepath.Join(baseDir, d.name) }
[ "\"HOME\"", "\"XDG_CONFIG_HOME\"", "\"HOME\"", "\"XDG_CACHE_HOME\"", "\"HOME\"", "\"XDG_STATE_HOME\"", "\"HOME\"", "\"XDG_DATA_HOME\"" ]
[]
[ "XDG_DATA_HOME", "XDG_CACHE_HOME", "XDG_STATE_HOME", "HOME", "XDG_CONFIG_HOME" ]
[]
["XDG_DATA_HOME", "XDG_CACHE_HOME", "XDG_STATE_HOME", "HOME", "XDG_CONFIG_HOME"]
go
5
0
web/main.go
package main import ( "encoding/gob" "fmt" "log" "net/http" "os" "time" "github.com/alexedwards/scs/v2" "github.com/captv89/bnb-booking/pkg/config" "github.com/captv89/bnb-booking/pkg/driver" "github.com/captv89/bnb-booking/pkg/handler" "github.com/captv89/bnb-booking/pkg/models" "github.com/captv89/bnb-booking/pkg/render" "github.com/joho/godotenv" ) // Mention port number without localhost while running in production var portNumber = "localhost:8080" // Assign configuration struct var app config.AppConfig var session *scs.SessionManager func init() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file", err) } } func main() { // Run the application db, err := run() if err != nil { log.Fatal(err) } defer db.SQL.Close() // Server routes log.Println("Server starting on port", portNumber) srv := &http.Server{ Addr: portNumber, Handler: routes(&app), } // Start the server log.Fatal(srv.ListenAndServe()) } func run() (*driver.DB, error) { log.Println("Starting Application..") // what to put in the sessions to store and retrive data gob.Register(models.Reservation{}) gob.Register(models.User{}) gob.Register(models.Room{}) gob.Register(models.Restriction{}) gob.Register(models.RoomRestriction{}) app.IsProduction = false // Session manager session = scs.New() session.Lifetime = 24 * time.Hour session.Cookie.Persist = true session.Cookie.SameSite = http.SameSiteLaxMode session.Cookie.Secure = app.IsProduction app.Session = session // Connect to database dbURI := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable",os.Getenv("DB_USER"),os.Getenv("DB_PASSWORD"),os.Getenv("DB_HOST"),os.Getenv("DB_PORT"),os.Getenv("DB_NAME")) db, err := driver.ConnectSQL(dbURI) if err != nil { log.Fatal("Unable to connect to database", err) } // Load template cache tc, err := render.CreateTemplateCache() if err != nil { return nil, err } // Assign settings and values to config app.TemplateCache = tc app.UseCache = false // Pass config to handlers repo repo := handler.NewRepository(&app, db) handler.AssignRepo(repo) // Pass the cache to the render package render.GetTemplateCache(&app) return db, nil }
[ "\"DB_USER\"", "\"DB_PASSWORD\"", "\"DB_HOST\"", "\"DB_PORT\"", "\"DB_NAME\"" ]
[]
[ "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_USER" ]
[]
["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_USER"]
go
5
0
samples/java/hadoop/sources/LinearRegressionNormEq.java
/* file: LinearRegressionNormEq.java */ /******************************************************************************* * Copyright 2017-2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // Content: // Java sample of multiple linear regression in the distributed processing // mode. // // The program trains the multiple linear regression model on a training // data set with the normal equations method and computes regression for // the test data. //////////////////////////////////////////////////////////////////////////////// */ package DAAL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.*; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.filecache.DistributedCache; import java.net.URI; import com.intel.daal.data_management.data.*; import com.intel.daal.data_management.data_source.*; import com.intel.daal.services.*; /* Implement Tool to be able to pass -libjars on start */ public class LinearRegressionNormEq extends Configured implements Tool { public static void main(String[] args) { int res = -1; try { res = ToolRunner.run(new Configuration(), new LinearRegressionNormEq(), args); } catch (Exception e) { ErrorHandling.printThrowable(e); } System.exit(res); } @Override public int run(String[] args) throws Exception { Configuration conf = this.getConf(); /* Put shared libraries into the distributed cache */ DistributedCache.createSymlink(conf); DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBJAVAAPI")), conf); DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBTBB")), conf); DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBTBBMALLOC")), conf); Job job = new Job(conf, "Linear regression with normal equations method (normEq) Job"); FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); job.setMapperClass(LinearRegressionNormEqStep1TrainingMapper.class); job.setReducerClass(LinearRegressionNormEqStep2TrainingReducerAndPrediction.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(WriteableData.class); job.setJarByClass(LinearRegressionNormEq.class); return job.waitForCompletion(true) ? 0 : 1; } }
[ "\"LIBJAVAAPI\"", "\"LIBTBB\"", "\"LIBTBBMALLOC\"" ]
[]
[ "LIBTBB", "LIBTBBMALLOC", "LIBJAVAAPI" ]
[]
["LIBTBB", "LIBTBBMALLOC", "LIBJAVAAPI"]
java
3
0
commands/timeout_flag.go
package commands import ( "os" "strconv" "code.cloudfoundry.org/cfdot/commands/helpers" "github.com/spf13/cobra" ) var ( timeoutConfig helpers.TLSConfig timeoutPreHooks = []func(cmd *cobra.Command, args []string) error{} ) func AddBBSAndTimeoutFlags(cmd *cobra.Command) { AddBBSFlags(cmd) cmd.Flags().IntVar(&timeoutConfig.Timeout, "timeout", 0, "timeout for BBS requests in seconds [environment variable equivalent: CFDOT_TIMEOUT]") timeoutPreHooks = append(timeoutPreHooks, cmd.PreRunE) cmd.PreRunE = TimeoutPrehook } func TimeoutPrehook(cmd *cobra.Command, args []string) error { var err error for _, f := range timeoutPreHooks { if f == nil { continue } err = f(cmd, args) if err != nil { return err } } timeoutConfig.Merge(Config) err = setTimeoutFlag(cmd, args) if err != nil { return err } Config = timeoutConfig return nil } func setTimeoutFlag(cmd *cobra.Command, args []string) error { if timeoutConfig.Timeout == 0 && os.Getenv("CFDOT_TIMEOUT") != "" { timeout, err := strconv.ParseInt(os.Getenv("CFDOT_TIMEOUT"), 10, 16) if err != nil { return err } timeoutConfig.Timeout = int(timeout) } return nil }
[ "\"CFDOT_TIMEOUT\"", "\"CFDOT_TIMEOUT\"" ]
[]
[ "CFDOT_TIMEOUT" ]
[]
["CFDOT_TIMEOUT"]
go
1
0
setup.py
#!/usr/bin/env python #_____________________________________________________________________________ # # This file is part of BridgeDB, a Tor bridge distribution system. # # :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]> # Aaron Gibson 0x2C4B239DD876C9F6 <[email protected]> # Nick Mathewson 0x21194EBB165733EA <[email protected]> # please also see AUTHORS file # :copyright: (c) 2007-2013, The Tor Project, Inc. # (c) 2007-2013, all entities within the AUTHORS file # :license: see LICENSE for licensing information #_____________________________________________________________________________ from __future__ import print_function import os import setuptools import sys from glob import glob # Fix circular dependency with setup.py install try: from babel.messages.frontend import compile_catalog, extract_messages from babel.messages.frontend import init_catalog, update_catalog except ImportError: compile_catalog = extract_messages = init_catalog = update_catalog = None # setup automatic versioning (see top-level versioneer.py file): import versioneer versioneer.versionfile_source = 'lib/bridgedb/_version.py' versioneer.versionfile_build = 'bridgedb/_version.py' # when creating a release, tags should be prefixed with 'bridgedb-', like so: # # git checkout -b release-6.6.6 develop # [do some stuff, merge whatever, test things] # git tag -S bridgedb-6.6.6 # git push tpo-common --tags # git checkout master # git merge -S --no-ff release-6.6.6 # git checkout develop # git merge -S --no-ff master # git branch -d release-6.6.6 # versioneer.tag_prefix = 'bridgedb-' # source tarballs should unpack to a directory like 'bridgedb-6.6.6' versioneer.parentdir_prefix = 'bridgedb-' pkgpath = os.path.join('lib', 'bridgedb') # Repo directory that contains translations; this directory should contain # both uncompiled translations (.po files) as well as compiled ones (.mo # files). We only want to install the .mo files. repo_i18n = os.path.join(pkgpath, 'i18n') # The list of country codes for supported languages will be stored as a list # variable, ``_supported``, in this file, so that the bridgedb packages # __init__.py can access it: repo_langs = os.path.join(pkgpath, '_langs.py') # The directory containing template files and other resources to serve on the # web server: repo_templates = os.path.join(pkgpath, 'templates') # The directories to install non-sourcecode resources into should always be # given as relative paths, in order to force distutils to install relative to # the rest of the codebase. # # Directory to installed compiled translations (.mo files) into: install_i18n = os.path.join('bridgedb', 'i18n') # Directory to install docs, license, and other text resources into: install_docs = os.path.join('share', 'doc', 'bridgedb') def get_cmdclass(): """Get our cmdclass dictionary for use in setuptool.setup(). This must be done outside the call to setuptools.setup() because we need to add our own classes to the cmdclass dictionary, and then update that dictionary with the one returned from versioneer.get_cmdclass(). """ cmdclass = {'test': Trial, 'compile_catalog': compile_catalog, 'extract_messages': extract_messages, 'init_catalog': init_catalog, 'update_catalog': update_catalog} cmdclass.update(versioneer.get_cmdclass()) return cmdclass def get_requirements(): """Extract the list of requirements from our requirements.txt. :rtype: 2-tuple :returns: Two lists, the first is a list of requirements in the form of pkgname==version. The second is a list of URIs or VCS checkout strings which specify the dependency links for obtaining a copy of the requirement. """ requirements_file = os.path.join(os.getcwd(), 'requirements.txt') requirements = [] links=[] try: with open(requirements_file) as reqfile: for line in reqfile.readlines(): line = line.strip() if line.startswith('#'): continue elif line.startswith( ('https://', 'git://', 'hg://', 'svn://')): links.append(line) else: requirements.append(line) except (IOError, OSError) as error: print(error) return requirements, links def get_supported_langs(): """Get the paths for all compiled translation files. The two-letter country code of each language which is going to be installed will be added to a list, and this list will be written to :attr:`repo_langs`, so that lib/bridgedb/__init__.py can store a package-level attribute ``bridgedb.__langs__``, which will be a list of any languages which were installed. Then, the paths of the compiled translations files are added to :ivar:`data_files`. These should be included in the ``data_files`` parameter in :func:`~setuptools.setup` in order for setuptools to be able to tell the underlying distutils ``install_data`` command to include these files. See http://docs.python.org/2/distutils/setupscript.html#installing-additional-files for more information. :ivar list supported: A list of two-letter country codes, one for each language we currently provide translations support for. :ivar list lang_dirs: The directories (relative or absolute) to install the compiled translation file to. :ivar list lang_files: The paths to compiled translations files, relative to this setup.py script. :rtype: list :returns: Two lists, ``lang_dirs`` and ``lang_files``. """ supported = [] lang_dirs = [] lang_files = [] for lang in os.listdir(repo_i18n): if lang.endswith('templates'): continue supported.append(lang) lang_dirs.append(os.path.join(install_i18n, lang)) lang_files.append(os.path.join(repo_i18n, lang, 'LC_MESSAGES', 'bridgedb.mo')) supported.sort() # Write our list of supported languages to 'lib/bridgedb/_langs.py': new_langs_lines = [] with open(repo_langs, 'r') as langsfile: for line in langsfile.readlines(): if line.startswith('supported'): # Change the 'supported' list() into a set(): line = "supported = set(%s)\n" % supported new_langs_lines.append(line) with open(repo_langs, 'w') as newlangsfile: for line in new_langs_lines: newlangsfile.write(line) return lang_dirs, lang_files def get_template_files(): """Return the paths to any web resource files to include in the package. :rtype: list :returns: Any files in :attr:`repo_templates` which match one of the glob patterns in :ivar:`include_patterns`. """ include_patterns = ['*.html', '*.txt', '*.asc', 'assets/*.png', 'assets/*.svg', 'assets/css/*.css', 'assets/font/*.woff', 'assets/font/*.ttf', 'assets/font/*.svg', 'assets/font/*.eot'] template_files = [] for include_pattern in include_patterns: pattern = os.path.join(repo_templates, include_pattern) matches = glob(pattern) template_files.extend(matches) return template_files def get_data_files(filesonly=False): """Return any hard-coded data_files which should be distributed. This is necessary so that both the distutils-derived :class:`installData` class and the setuptools ``data_files`` parameter include the same files. Call this function with ``filesonly=True`` to get a list of files suitable for giving to the ``package_data`` parameter in ``setuptools.setup()``. Or, call it with ``filesonly=False`` (the default) to get a list which is suitable for using as ``distutils.command.install_data.data_files``. :param bool filesonly: If true, only return the locations of the files to install, not the directories to install them into. :rtype: list :returns: If ``filesonly``, returns a list of file paths. Otherwise, returns a list of 2-tuples containing: one, the directory to install to, and two, the files to install to that directory. """ data_files = [] doc_files = ['README', 'TODO', 'LICENSE', 'requirements.txt'] lang_dirs, lang_files = get_supported_langs() template_files = get_template_files() if filesonly: data_files.extend(doc_files) for lst in lang_files, template_files: for filename in lst: if filename.startswith(pkgpath): # The +1 gets rid of the '/' at the beginning: filename = filename[len(pkgpath) + 1:] data_files.append(filename) else: data_files.append((install_docs, doc_files)) for ldir, lfile in zip(lang_dirs, lang_files): data_files.append((ldir, [lfile,])) #[sys.stdout.write("Added data_file '%s'\n" % x) for x in data_files] return data_files class Trial(setuptools.Command): """Twisted Trial setuptools command. Based on the setuptools Trial command in Zooko's Tahoe-LAFS, as well as https://github.com/simplegeo/setuptools-trial/ (which is also based on the Tahoe-LAFS code). Pieces of the original implementation of this 'test' command (that is, for the original pyunit-based BridgeDB tests which, a long time ago, in a galaxy far far away, lived in bridgedb.Tests) were based on setup.py from Nick Mathewson's mixminion, which was based on the setup.py from Zooko's pyutil package, which was in turn based on http://mail.python.org/pipermail/distutils-sig/2002-January/002714.html. Crusty, old-ass Python, like hella wut. """ description = "Run Twisted Trial-based tests." user_options = [ ('debug', 'b', ("Run tests in a debugger. If that debugger is pdb, will " "load '.pdbrc' from current directory if it exists.")), ('debug-stacktraces', 'B', "Report Deferred creation and callback stack traces"), ('debugger=', None, ("The fully qualified name of a debugger to use if " "--debug is passed (default: pdb)")), ('disablegc', None, "Disable the garbage collector"), ('force-gc', None, "Have Trial run gc.collect() before and after each test case"), ('jobs=', 'j', "Number of local workers to run, a strictly positive integer"), ('profile', None, "Run tests under the Python profiler"), ('random=', 'Z', "Run tests in random order using the specified seed"), ('reactor=', 'r', "Which reactor to use"), ('reporter=', None, "Customize Trial's output with a reporter plugin"), ('rterrors', 'e', "Realtime errors: print out tracebacks as soon as they occur"), ('spew', None, "Print an insanely verbose log of everything that happens"), ('testmodule=', None, "Filename to grep for test cases (-*- test-case-name)"), ('tbformat=', None, ("Specify the format to display tracebacks with. Valid " "formats are 'plain', 'emacs', and 'cgitb' which uses " "the nicely verbose stdlib cgitb.text function")), ('unclean-warnings', None, "Turn dirty reactor errors into warnings"), ('until-failure', 'u', "Repeat a test (specified by -s) until it fails."), ('without-module=', None, ("Fake the lack of the specified modules, separated " "with commas")), ] boolean_options = ['debug', 'debug-stacktraces', 'disablegc', 'force-gc', 'profile', 'rterrors', 'spew', 'unclean-warnings', 'until-failure'] def initialize_options(self): self.debug = None self.debug_stacktraces = None self.debugger = None self.disablegc = None self.force_gc = None self.jobs = None self.profile = None self.random = None self.reactor = None self.reporter = None self.rterrors = None self.spew = None self.testmodule = None self.tbformat = None self.unclean_warnings = None self.until_failure = None self.without_module = None def finalize_options(self): build = self.get_finalized_command('build') self.build_purelib = build.build_purelib self.build_platlib = build.build_platlib def run(self): self.run_command('build') old_path = sys.path[:] sys.path[0:0] = [self.build_purelib, self.build_platlib] result = 1 try: result = self.run_tests() finally: sys.path = old_path raise SystemExit(result) def run_tests(self): # We do the import from Twisted inside the function instead of the top # of the file because since Twisted is a setup_requires, we can't # assume that Twisted will be installed on the user's system prior, so # if we don't do the import here, then importing from this plugin will # fail. from twisted.scripts import trial if not self.testmodule: self.testmodule = "bridgedb.test" # Handle parsing the trial options passed through the setuptools # trial command. cmd_options = [] for opt in self.boolean_options: if getattr(self, opt.replace('-', '_'), None): cmd_options.append('--%s' % opt) for opt in ('debugger', 'jobs', 'random', 'reactor', 'reporter', 'testmodule', 'tbformat', 'without-module'): value = getattr(self, opt.replace('-', '_'), None) if value is not None: cmd_options.extend(['--%s' % opt, value]) config = trial.Options() config.parseOptions(cmd_options) config['tests'] = [self.testmodule,] trial._initialDebugSetup(config) trialRunner = trial._makeRunner(config) suite = trial._getSuite(config) # run the tests if self.until_failure: test_result = trialRunner.runUntilFailure(suite) else: test_result = trialRunner.run(suite) if test_result.wasSuccessful(): return 0 # success return 1 # failure # If there is an environment variable BRIDGEDB_INSTALL_DEPENDENCIES=0, it will # disable checking for, fetching, and installing BridgeDB's dependencies with # easy_install. # # Setting BRIDGEDB_INSTALL_DEPENDENCIES=0 is *highly* recommended, because # easy_install is a security nightmare. Automatically installing dependencies # is enabled by default, however, because this is how all Python packages are # supposed to work. if bool(int(os.environ.get("BRIDGEDB_INSTALL_DEPENDENCIES", 1))): requires, deplinks = get_requirements() else: requires, deplinks = [], [] setuptools.setup( name='bridgedb', version=versioneer.get_version(), description='Backend systems for distribution of Tor bridge relays', author='Nick Mathewson', author_email='nickm at torproject dot org', maintainer='Isis Agora Lovecruft', maintainer_email='[email protected] 0xA3ADB67A2CDB8B35', url='https://www.torproject.org', download_url='https://gitweb.torproject.org/bridgedb.git', package_dir={'': 'lib'}, packages=['bridgedb', 'bridgedb.email', 'bridgedb.parse', 'bridgedb.test'], scripts=['scripts/bridgedb', 'scripts/get-tor-exits'], extras_require={'test': ["sure==1.2.2", "coverage==3.7.1", "leekspin==1.1.4"]}, zip_safe=False, cmdclass=get_cmdclass(), include_package_data=True, install_requires=requires, dependency_links=deplinks, package_data={'bridgedb': get_data_files(filesonly=True)}, exclude_package_data={'bridgedb': ['*.po', '*.pot']}, message_extractors={pkgpath: [ ('**.py', 'python', None), ('templates/**.html', 'mako', None), ('public/**', 'ignore', None)]}, ) # XXX I think we don't need the 'public/**' babel.messages.frontend.method_map # anymore... 2013-10-15 --isis
[]
[]
[ "BRIDGEDB_INSTALL_DEPENDENCIES" ]
[]
["BRIDGEDB_INSTALL_DEPENDENCIES"]
python
1
0
var/spack/repos/builtin/packages/go/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os import platform import re import llnl.util.tty as tty from spack import * # - vanilla CentOS 7, and possibly other systems, fail a test: # TestCloneNEWUSERAndRemapRootDisableSetgroups # # The Fix, discussed here: https://github.com/golang/go/issues/16283 # is to enable "user_namespace". # # On a Digital Ocean image, this can be achieved by updating # `/etc/default/grub` so that the `GRUB_CMDLINE_LINUX` variable # includes `user_namespace.enable=1`, re-cooking the grub # configuration with `sudo grub2-mkconfig -o /boot/grub2/grub.cfg`, # and then rebooting. # # - on CentOS 7 systems (and possibly others) you need to have the # glibc package installed or various static cgo tests fail. # # - When building on a *large* machine (144 cores, 1.5TB RAM) I need # to run `ulimit -u 8192` to bump up the max number of user processes. # Failure to do so results in an explosion in one of the tests and an # epic stack trace.... class Go(Package): """The golang compiler and build environment""" homepage = "https://golang.org" url = "https://dl.google.com/go/go1.16.6.src.tar.gz" git = "https://go.googlesource.com/go.git" extendable = True executables = ['^go$'] maintainers = ['alecbcs'] version('1.18', sha256='38f423db4cc834883f2b52344282fa7a39fbb93650dc62a11fdf0be6409bdad6') version('1.17.8', sha256='2effcd898140da79a061f3784ca4f8d8b13d811fb2abe9dad2404442dabbdf7a') version('1.17.7', sha256='c108cd33b73b1911a02b697741df3dea43e01a5c4e08e409e8b3a0e3745d2b4d') version('1.17.3', sha256='705c64251e5b25d5d55ede1039c6aa22bea40a7a931d14c370339853643c3df0', deprecated=True) version('1.17.2', sha256='2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431', deprecated=True) version('1.17.1', sha256='49dc08339770acd5613312db8c141eaf61779995577b89d93b541ef83067e5b1', deprecated=True) version('1.17', sha256='3a70e5055509f347c0fb831ca07a2bf3b531068f349b14a3c652e9b5b67beb5d', deprecated=True) version('1.16.10', sha256='a905472011585e403d00d2a41de7ced29b8884309d73482a307f689fd0f320b5') version('1.16.9', sha256='0a1cc7fd7bd20448f71ebed64d846138850d5099b18cf5cc10a4fc45160d8c3d') version('1.16.6', sha256='a3a5d4bc401b51db065e4f93b523347a4d343ae0c0b08a65c3423b05a138037d') version('1.16.5', sha256='7bfa7e5908c7cc9e75da5ddf3066d7cbcf3fd9fa51945851325eebc17f50ba80') version('1.16.4', sha256='ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503') version('1.16.3', sha256='b298d29de9236ca47a023e382313bcc2d2eed31dfa706b60a04103ce83a71a25') version('1.16.2', sha256='37ca14287a23cb8ba2ac3f5c3dd8adbc1f7a54b9701a57824bf19a0b271f83ea') version('1.16', sha256='7688063d55656105898f323d90a79a39c378d86fe89ae192eb3b7fc46347c95a') version('1.15.13', sha256='99069e7223479cce4553f84f874b9345f6f4045f27cf5089489b546da619a244') version('1.15.12', sha256='1c6911937df4a277fa74e7b7efc3d08594498c4c4adc0b6c4ae3566137528091') version('1.15.11', sha256='f25b2441d4c76cf63cde94d59bab237cc33e8a2a139040d904c8630f46d061e5') version('1.15.8', sha256='540c0ab7781084d124991321ed1458e479982de94454a98afab6acadf38497c2') version('1.15.7', sha256='8631b3aafd8ecb9244ec2ffb8a2a8b4983cf4ad15572b9801f7c5b167c1a2abc') version('1.15.6', sha256='890bba73c5e2b19ffb1180e385ea225059eb008eb91b694875dd86ea48675817') version('1.15.5', sha256='c1076b90cf94b73ebed62a81d802cd84d43d02dea8c07abdc922c57a071c84f1') version('1.15.2', sha256='28bf9d0bcde251011caae230a4a05d917b172ea203f2a62f2c2f9533589d4b4d') version('1.15.1', sha256='d3743752a421881b5cc007c76b4b68becc3ad053e61275567edab1c99e154d30') version('1.15', sha256='69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a') version('1.14.14', sha256='6204bf32f58fae0853f47f1bd0c51d9e0ac11f1ffb406bed07a0a8b016c8a76f') version('1.14.13', sha256='ba1d244c6b5c0ed04aa0d7856d06aceb89ed31b895de6ff783efb1cc8ab6b177') version('1.14.12', sha256='b34f4b7ad799eab4c1a52bdef253602ce957125a512f5a1b28dce43c6841b971') version('1.14.9', sha256='c687c848cc09bcabf2b5e534c3fc4259abebbfc9014dd05a1a2dc6106f404554') version('1.14.8', sha256='d9a613fb55f508cf84e753456a7c6a113c8265839d5b7fe060da335c93d6e36a') version('1.14.6', sha256='73fc9d781815d411928eccb92bf20d5b4264797be69410eac854babe44c94c09') version('1.14.5', sha256='ca4c080c90735e56152ac52cd77ae57fe573d1debb1a58e03da9cc362440315c') version('1.14.4', sha256='7011af3bbc2ac108d1b82ea8abb87b2e63f78844f0259be20cde4d42c5c40584') version('1.14.3', sha256='93023778d4d1797b7bc6a53e86c3a9b150c923953225f8a48a2d5fabc971af56') version('1.14.2', sha256='98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c') version('1.14.1', sha256='2ad2572115b0d1b4cb4c138e6b3a31cee6294cb48af75ee86bec3dca04507676') version('1.14', sha256='6d643e46ad565058c7a39dac01144172ef9bd476521f42148be59249e4b74389') version('1.13.14', sha256='197333e97290e9ea8796f738d61019dcba1c377c2f3961fd6a114918ecc7ab06') version('1.13.13', sha256='ab7e44461e734ce1fd5f4f82c74c6d236e947194d868514d48a2b1ea73d25137') version('1.13.12', sha256='17ba2c4de4d78793a21cc659d9907f4356cd9c8de8b7d0899cdedcef712eba34') version('1.13.11', sha256='89ed1abce25ad003521c125d6583c93c1280de200ad221f961085200a6c00679') version('1.13.10', sha256='eb9ccc8bf59ed068e7eff73e154e4f5ee7eec0a47a610fb864e3332a2fdc8b8c') version('1.13.9', sha256='34bb19d806e0bc4ad8f508ae24bade5e9fedfa53d09be63b488a9314d2d4f31d') version('1.13.8', sha256='b13bf04633d4d8cf53226ebeaace8d4d2fd07ae6fa676d0844a688339debec34') version('1.13.7', sha256='e4ad42cc5f5c19521fbbbde3680995f2546110b5c6aa2b48c3754ff7af9b41f4') version('1.13.6', sha256='aae5be954bdc40bcf8006eb77e8d8a5dde412722bc8effcdaf9772620d06420c') version('1.13.5', sha256='27d356e2a0b30d9983b60a788cf225da5f914066b37a6b4f69d457ba55a626ff') version('1.13.4', sha256='95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624') version('1.13.3', sha256='4f7123044375d5c404280737fbd2d0b17064b66182a65919ffe20ffe8620e3df') version('1.13.2', sha256='1ea68e01472e4276526902b8817abd65cf84ed921977266f0c11968d5e915f44') version('1.13.1', sha256='81f154e69544b9fa92b1475ff5f11e64270260d46e7e36c34aafc8bc96209358') version('1.13', sha256='3fc0b8b6101d42efd7da1da3029c0a13f22079c0c37ef9730209d8ec665bf122') version('1.12.17', sha256='de878218c43aa3c3bad54c1c52d95e3b0e5d336e1285c647383e775541a28b25') version('1.12.15', sha256='8aba74417e527524ad5724e6e6c21016795d1017692db76d1b0851c6bdec84c3') version('1.12.14', sha256='39dbf05f7e2ffcb19b08f07d53dcc96feadeb1987fef9e279e7ff0c598213064') version('1.12.13', sha256='5383d3b8db4baa48284ffcb14606d9cad6f03e9db843fa6d835b94d63cccf5a7') version('1.12.12', sha256='fcb33b5290fa9bcc52be3211501540df7483d7276b031fc77528672a3c705b99') version('1.12.11', sha256='fcf58935236802929f5726e96cd1d900853b377bec2c51b2e37219c658a4950f') version('1.12.10', sha256='f56e48fce80646d3c94dcf36d3e3f490f6d541a92070ad409b87b6bbb9da3954') version('1.12.9', sha256='ab0e56ed9c4732a653ed22e232652709afbf573e710f56a07f7fdeca578d62fc') version('1.12.8', sha256='11ad2e2e31ff63fcf8a2bdffbe9bfa2e1845653358daed593c8c2d03453c9898') version('1.12.6', sha256='c96c5ccc7455638ae1a8b7498a030fe653731c8391c5f8e79590bce72f92b4ca') version('1.12.5', sha256='2aa5f088cbb332e73fc3def546800616b38d3bfe6b8713b8a6404060f22503e8') version('1.11.13', sha256='5032095fd3f641cafcce164f551e5ae873785ce7b07ca7c143aecd18f7ba4076') version('1.11.11', sha256='1fff7c33ef2522e6dfaf6ab96ec4c2a8b76d018aae6fc88ce2bd40f2202d0f8c') version('1.11.10', sha256='df27e96a9d1d362c46ecd975f1faa56b8c300f5c529074e9ea79bdd885493c1b') version('1.11.5', sha256='bc1ef02bb1668835db1390a2e478dcbccb5dd16911691af9d75184bbe5aa943e') version('1.11.4', sha256='4cfd42720a6b1e79a8024895fa6607b69972e8e32446df76d6ce79801bbadb15') version('1.11.2', sha256='042fba357210816160341f1002440550e952eb12678f7c9e7e9d389437942550') version('1.11.1', sha256='558f8c169ae215e25b81421596e8de7572bd3ba824b79add22fba6e284db1117') version('1.11', sha256='afc1e12f5fe49a471e3aae7d906c73e9d5b1fdd36d52d72652dde8f6250152fb') version('1.10.3', sha256='567b1cc66c9704d1c019c50bef946272e911ec6baf244310f87f4e678be155f2') version('1.10.2', sha256='6264609c6b9cd8ed8e02ca84605d727ce1898d74efa79841660b2e3e985a98bd') version('1.10.1', sha256='589449ff6c3ccbff1d391d4e7ab5bb5d5643a5a41a04c99315e55c16bbf73ddc') version('1.9.5', sha256='f1c2bb7f32bbd8fa7a19cc1608e0d06582df32ff5f0340967d83fb0017c49fbc') version('1.9.2', sha256='665f184bf8ac89986cfd5a4460736976f60b57df6b320ad71ad4cef53bb143dc') version('1.9.1', sha256='a84afc9dc7d64fe0fa84d4d735e2ece23831a22117b50dafc75c1484f1cb550e') version('1.9', sha256='a4ab229028ed167ba1986825751463605264e44868362ca8e7accc8be057e993') version('1.8.3', sha256='5f5dea2447e7dcfdc50fa6b94c512e58bfba5673c039259fd843f68829d99fa6') version('1.8.1', sha256='33daf4c03f86120fdfdc66bddf6bfff4661c7ca11c5da473e537f4d69b470e57') version('1.8', sha256='406865f587b44be7092f206d73fc1de252600b79b3cacc587b74b5ef5c623596') version('1.7.5', sha256='4e834513a2079f8cbbd357502cccaac9507fd00a1efe672375798858ff291815') version('1.7.4', sha256='4c189111e9ba651a2bb3ee868aa881fab36b2f2da3409e80885ca758a6b614cc') version('1.6.4', sha256='8796cc48217b59595832aa9de6db45f58706dae68c9c7fbbd78c9fdbe3cd9032') provides('golang') depends_on('git', type=('build', 'link', 'run')) # aarch64 machines (including Macs with Apple silicon) can't use # go-bootstrap because it pre-dates aarch64 support in Go. These machines # have to rely on Go support in gcc (which may require compiling a version # of gcc with Go support just to satisfy this requirement). However, # there's also a bug in some versions of GCC's Go front-end that prevents # these versions from properly bootstrapping Go. (See issue #47771 # https://github.com/golang/go/issues/47771 ) On the 10.x branch, we need # at least 10.4. On the 11.x branch, we need at least 11.3. if platform.machine() == 'aarch64': depends_on('[email protected]:10,11.3.0: languages=go', type='build') else: depends_on('go-bootstrap', type='build') # https://github.com/golang/go/issues/17545 patch('time_test.patch', when='@1.6.4:1.7.4') # https://github.com/golang/go/issues/17986 # The fix for this issue has been merged into the 1.8 tree. patch('misc-cgo-testcshared.patch', level=0, when='@1.6.4:1.7.5') # Unrecognized option '-fno-lto' conflicts('%gcc@:4', when='@1.17:') @classmethod def determine_version(cls, exe): output = Executable(exe)('version', output=str, error=str) match = re.search(r'go version go(\S+)', output) return match.group(1) if match else None # NOTE: Older versions of Go attempt to download external files that have # since been moved while running the test suite. This patch modifies the # test files so that these tests don't cause false failures. # See: https://github.com/golang/go/issues/15694 @when('@:1.4.3') def patch(self): test_suite_file = FileFilter(join_path('src', 'run.bash')) test_suite_file.filter( r'^(.*)(\$GOROOT/src/cmd/api/run.go)(.*)$', r'# \1\2\3', ) def install(self, spec, prefix): bash = which('bash') wd = '.' # 1.11.5 directory structure is slightly different if self.version == Version('1.11.5'): wd = 'go' with working_dir(join_path(wd, 'src')): bash('{0}.bash'.format('all' if self.run_tests else 'make')) install_tree(wd, prefix) def setup_build_environment(self, env): env.set('GOROOT_FINAL', self.spec.prefix) # We need to set CC/CXX_FOR_TARGET, otherwise cgo will use the # internal Spack wrappers and fail. env.set('CC_FOR_TARGET', self.compiler.cc) env.set('CXX_FOR_TARGET', self.compiler.cxx) def setup_dependent_package(self, module, dependent_spec): """Called before go modules' install() methods. In most cases, extensions will only need to set GOPATH and use go:: env['GOPATH'] = self.source_path + ':' + env['GOPATH'] go('get', '<package>', env=env) install_tree('bin', prefix.bin) """ # Add a go command/compiler for extensions module.go = self.spec['go'].command def generate_path_components(self, dependent_spec): if os.environ.get('GOROOT', False): tty.warn('GOROOT is set, this is not recommended') # Set to include paths of dependencies path_components = [dependent_spec.prefix] for d in dependent_spec.traverse(): if d.package.extends(self.spec): path_components.append(d.prefix) return ':'.join(path_components) def setup_dependent_build_environment(self, env, dependent_spec): # This *MUST* be first, this is where new code is installed env.prepend_path('GOPATH', self.generate_path_components( dependent_spec)) def setup_dependent_run_environment(self, env, dependent_spec): # Allow packages to find this when using module files env.prepend_path('GOPATH', self.generate_path_components( dependent_spec))
[]
[]
[ "GOROOT" ]
[]
["GOROOT"]
python
1
0
test/distributed/test_c10d_common.py
# Owner(s): ["oncall: distributed"] import copy import os import sys import tempfile import threading import time import unittest from datetime import timedelta from itertools import product from sys import platform import torch import torch.distributed as dist if not dist.is_available(): print("distributed package not available, skipping tests", file=sys.stderr) sys.exit(0) import torch.distributed.distributed_c10d as c10d import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, ) from torch.testing._internal.common_utils import ( TestCase, load_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) if TEST_WITH_DEV_DBG_ASAN: print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr) sys.exit(0) # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests if platform == "darwin": LOOPBACK = "lo0" else: LOOPBACK = "lo" torch.backends.cuda.matmul.allow_tf32 = False def gpus_for_rank(world_size): """Multigpu tests are designed to simulate the multi nodes with multi GPUs on each node. Nccl backend requires equal #GPUs in each process. On a single node, all visible GPUs are evenly divided to subsets, each process only uses a subset. """ visible_devices = list(range(torch.cuda.device_count())) gpus_per_process = torch.cuda.device_count() // world_size gpus_for_rank = [] for rank in range(world_size): gpus_for_rank.append( visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process] ) return gpus_for_rank class AbstractTimeoutTest(object): def _test_store_timeout(self, backend, init_method, c2p): try: dist.init_process_group( backend=backend, init_method=init_method, world_size=1, rank=0, timeout=timedelta(seconds=1), ) default_store = c10d._get_default_store() tik = time.time() with self.assertRaisesRegex(RuntimeError, "Timeout"): default_store.get("nonexistent key") tok = time.time() dist.destroy_process_group() c2p.append(float(tok - tik)) except RuntimeError as e: # catch "Address already in use" error and report it to the main # thread c2p.append(e) def _init_methods(self): f = tempfile.NamedTemporaryFile(delete=False) if sys.platform == "win32": yield "file:///%s" % f.name.replace("\\", "/") f.close() else: yield "file://%s" % f.name f.close() yield "tcp://127.0.0.1:%d" % common.find_free_port() def _test_default_store_timeout(self, backend): for init_method in self._init_methods(): c2p = [] t = threading.Thread( target=self._test_store_timeout, args=(backend, init_method, c2p) ) t.daemon = True t.start() t.join(5) self.assertEqual(1, len(c2p)) if isinstance(c2p[0], float): # waiting time should be 1s, use 3s to rule out false alarm self.assertGreater(3, c2p[0]) elif isinstance(c2p[0], RuntimeError): # let @retry_on_connect_failures handle the error raise c2p[0] else: raise RuntimeError("Unexpected type {}".format(type(c2p[0]))) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 50, bias=False) self.fc3 = nn.Linear(50, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) class DoubleGpuNet(nn.Module): def __init__(self, gpus): super(DoubleGpuNet, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0]) self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1]) self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1]) self.relu = nn.ReLU() self.no_grad_param = nn.Parameter( torch.tensor([2, 2]).long(), requires_grad=False ).to(gpus[0]) def forward(self, x): dev0 = self.fc1.weight.device dev1 = self.fc2.weight.device x = self.relu(self.fc1(x.to(dev0))) x = self.relu(self.fc2(x.to(dev1))) x = self.fc3(x) return F.softmax(x, dim=1).to(dev0) class QuadraGpuNet(nn.Module): def __init__(self, gpus): super(QuadraGpuNet, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0]) self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1]) self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2]) self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3]) self.relu = nn.ReLU() self.no_grad_param = nn.Parameter( torch.tensor([2, 2]).long(), requires_grad=False ).to(gpus[0]) def forward(self, x): dev0 = self.fc1.weight.device dev1 = self.fc2.weight.device dev2 = self.fc3.weight.device dev3 = self.fc4.weight.device x = self.relu(self.fc1(x.to(dev0))) x = self.relu(self.fc2(x.to(dev1))) x = self.relu(self.fc3(x.to(dev2))) x = self.fc4(x.to(dev3)) return F.softmax(x, dim=1).to(dev0) class ConvNet(nn.Module): def __init__(self, gpus, layouts, dtypes): super(ConvNet, self).__init__() self.dtypes = dtypes if isinstance(gpus, list): self.layer_gpus = gpus else: gpus = [gpus] * 4 self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to( device=gpus[0], memory_format=layouts[0], dtype=dtypes[0] ) self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to( device=gpus[1], memory_format=layouts[1], dtype=dtypes[1] ) self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to( device=gpus[2], memory_format=layouts[2], dtype=dtypes[2] ) self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to( device=gpus[3], memory_format=layouts[3], dtype=dtypes[3] ) def forward(self, x): x = x.to(self.dtypes[0]) # Could say # x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1]) # etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose # is to verify weights are where expected if the model gets replicated. gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4 x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1]) x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2]) x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3]) return self.conv3(x) class Task(nn.Module): def __init__(self): super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, x): return self.p + x class ModuleForDdpCommHook(nn.Module): def __init__(self): super().__init__() self.t0 = Task() def forward(self, x, rank): return self.t0(x + rank) class SparseGradientModule(nn.Module): def __init__(self): super(SparseGradientModule, self).__init__() self.embedding = nn.EmbeddingBag(10, 10, sparse=True) def forward(self, x): return F.softmax(self.embedding(x), dim=1) class AbstractDistributedDataParallelTest(object): def tearDown(self): # DistributedDataParallel test doesn't seem to call FileStore destructor # TODO: investigate this test and the test is known to have issues # Use this hack to remove files for that test try: os.remove(self.file_name) except OSError: pass @property def world_size(self): return 2 def _prepare_single_device_module( self, process_group, devices, device_ids, global_batch_size, gradient_as_bucket_view=False, ): model = Net() device = devices[0] if devices else torch.device("cuda:%d" % self.rank) ddp_model = DistributedDataParallel( copy.deepcopy(model).to(device), device_ids=device_ids, process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) model.to(device) input = torch.randn(global_batch_size, 2).to(device) target = torch.randn(global_batch_size, 4).to(device) return model, ddp_model, input, target def _prepare_multi_device_module( self, process_group, devices, device_ids, global_batch_size, gradient_as_bucket_view=False, ): self.assertTrue( len(devices) == 2 or len(devices) == 4, "unexpected devices for ddp tests {}".format(devices), ) if len(devices) == 2: model = DoubleGpuNet(devices) elif len(devices) == 4: model = QuadraGpuNet(devices) ddp_model = DistributedDataParallel( copy.deepcopy(model), device_ids=device_ids, process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) input = torch.randn(global_batch_size, 2).cuda(devices[0]) target = torch.randn(global_batch_size, 4) return model, ddp_model, input, target def _test_ddp_with_process_group( self, process_group, devices, device_ids, multi_device=False, gradient_as_bucket_view=False, ): """ Note: we pass down `device_ids` all the way to DistributedDataParallel as part of the test. Below you find tests that either use a list of integers, a list of `torch.Device` instances, or an empty list. The `devices` argument is used to control placement of the model and must always be specified as list of `torch.Device` instances. """ local_batch_size = 1 if devices is None else len(devices) global_batch_size = self.world_size * local_batch_size if multi_device: model, ddp_model, input, target = self._prepare_multi_device_module( process_group, devices, device_ids, global_batch_size, gradient_as_bucket_view, ) ddp_logging_data = ddp_model._get_ddp_logging_data() self.assertTrue(ddp_logging_data.get("is_multi_device_module")) else: model, ddp_model, input, target = self._prepare_single_device_module( process_group, devices, device_ids, global_batch_size, gradient_as_bucket_view, ) ddp_logging_data = ddp_model._get_ddp_logging_data() self.assertFalse(ddp_logging_data.get("is_multi_device_module")) def step_model(model, input, target): model.train() output = model(input) loss = F.mse_loss(output, target.to(output.device)) loss.backward() def update_parameters(model): for param in model.parameters(): with torch.no_grad(): param -= param.grad param.grad = None # check two model parameters over 2 iterations for iteration in range(2): # single cpu/gpu training step_model(model, input, target) # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs step_model( ddp_model, input[ self.rank * local_batch_size : (self.rank + 1) * local_batch_size ], target[ self.rank * local_batch_size : (self.rank + 1) * local_batch_size ], ) # Update weights and run a second iteration to shake out errors update_parameters(model) update_parameters(ddp_model) self.assertEqual( len(list(model.parameters())), len(list(ddp_model.parameters())) ) for i, j in zip(model.parameters(), ddp_model.parameters()): self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5) # Shuffle the input so that DDP input is different torch.manual_seed(1337 + iteration) input = input[torch.randperm(global_batch_size)] def _gpu_model_with_ddp_comm_hook( self, process_group, hook=None, gradient_as_bucket_view=False, state=None ): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( ModuleForDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) # Register a DDP communication hook if any. if hook is not None: gpu_model.register_comm_hook(state, hook) return gpu_model def _gpu_model_with_builtin_ddp_comm_hook( self, process_group, hook=None, gradient_as_bucket_view=False ): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( ModuleForDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) # Register a built-in DDP communication hook if defined if hook is not None: gpu_model._register_builtin_comm_hook(hook) return gpu_model def _run_and_verify_hook(self, model, input, expected_grad): # Run forward output = model(input, self.rank) # Run backward output.mean().backward() [self.assertEqual(p.grad, expected_grad) for p in model.parameters()] def _simple_hook( self, state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: fut = torch.futures.Future() fut.set_result(torch.ones_like(bucket.buffer())) def fut_then(fut): # Add ones to fut's result. t = fut.value() return t + torch.ones_like(t) return fut.then(fut_then) class DistributedDataParallelTest( AbstractDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super(DistributedDataParallelTest, self).setUp() self._spawn_processes() def test_invalid_powerSGD_state(self): for start_powerSGD_iter, use_error_feedback, warm_start in product( [0, 1], [True, False], [True, False] ): if not use_error_feedback and not warm_start: continue with self.assertRaisesRegex( ValueError, "Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, " "because PowerSGD can only be applied after the first two iterations in DDP.", ): state = powerSGD.PowerSGDState( process_group=None, matrix_approximation_rank=1, start_powerSGD_iter=start_powerSGD_iter, use_error_feedback=use_error_feedback, warm_start=warm_start, ) class ComputeBucketAssignmentTest(TestCase): def test_single_limit_single_dtype(self): tensors = [ torch.empty([100], dtype=torch.float), torch.empty([200], dtype=torch.float), torch.empty([100], dtype=torch.float), torch.empty([50], dtype=torch.float), ] result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size( tensors, [400] ) self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits)) self.assertEqual([[0], [1], [2], [3]], result) def test_single_limit_multi_dtype(self): tensors = [ torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), ] result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size( tensors, [400] ) self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits)) self.assertEqual([[0, 2], [1, 3], [4], [5]], result) def test_multi_limit_single_dtype(self): tensors = [ torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float), ] result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size( tensors, [40, 80] ) self.assertEqual(per_bucket_size_limits, [40, 80, 80]) self.assertEqual([[0], [1, 2], [3]], result) def test_multi_limit_multi_dtype(self): tensors = [ torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), ] result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size( tensors, [200, 400] ) self.assertEqual([[0], [1], [2, 4], [3, 5]], result) self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400]) class AbstractCommTest(object): @property def op_timeout_sec(self): return 1 @property def world_size(self): return 2 def _verify_sequence_number_across_pg(self, pg, verify_pg): seq_num = pg._get_sequence_number_for_group() obj_list = [None for _ in range(dist.get_world_size(verify_pg))] # We use a separate pg to verify the sequence numbers, otherwise these # collectives will themselves increment the sequence number. dist.all_gather_object(obj_list, seq_num, group=verify_pg) self.assertEqual(len(set(obj_list)), 1) return obj_list[0] def _test_sequence_num_incremented(self, process_group, ranks): # verify initial sequence numbers. Use a distinct process group for # verification to keep counts as expected with respect to process_group. verify_pg = dist.new_group( ranks=ranks, backend="gloo", ) assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg) initial_num = ( self._verify_sequence_number_across_pg( pg=process_group, verify_pg=verify_pg ) if not c10d._rank_not_in_group(process_group) else -1 ) # Verify sequence numbers are appropriately incremented for i in range(10): t = torch.ones(1, device=torch.cuda.current_device()) dist.all_reduce(t, group=process_group) if not c10d._rank_not_in_group(process_group): seq_num = self._verify_sequence_number_across_pg( pg=process_group, verify_pg=verify_pg, ) self.assertEqual(initial_num + i + 1, seq_num) if dist.get_world_size(process_group) > 2: # Test when certain ranks don't call collectives if dist.get_rank(process_group) not in [0, 2]: dist.all_reduce(t, group=process_group, async_op=True) # Now ranks 0 and 2 should be lagging by 1. if not c10d._rank_not_in_group(process_group): seq_num = process_group._get_sequence_number_for_group() rank = dist.get_rank(process_group) obj_list = [None for _ in range(dist.get_world_size(verify_pg))] dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg) rank_to_seq_num = {rank: num for (rank, num) in obj_list} self.assertEqual(len(set(rank_to_seq_num.values())), 2) self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2]) expected_same = { rank_to_seq_num[i] for i in rank_to_seq_num.keys() if i not in [0, 2] } self.assertEqual(len(expected_same), 1) self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1]) def _test_sequence_num_incremented_default_group(self, backend_name): torch.cuda.set_device(self.rank) store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( backend_name, world_size=self.world_size, rank=self.rank, store=store, ) self._test_sequence_num_incremented( c10d._get_default_group(), ranks=list(i for i in range(dist.get_world_size())), ) def _test_sequence_num_incremented_subgroup(self, backend_name): torch.cuda.set_device(self.rank) store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( backend_name, world_size=self.world_size, rank=self.rank, store=store, ) subgroup_ranks = [0, 1, 2] subgroup = dist.new_group(subgroup_ranks) self._test_sequence_num_incremented(subgroup, subgroup_ranks) def _test_sequence_num_set_default_pg(self, backend): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( backend, world_size=self.world_size, rank=self.rank, store=store, ) default_pg = c10d._get_default_group() seq_num = default_pg._get_sequence_number_for_group() obj_list = [None for _ in range(dist.get_world_size())] dist.all_gather_object(obj_list, seq_num) self.assertEqual(len(set(obj_list)), 1) def _test_sequence_num_set_new_group(self, backend): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( backend, world_size=self.world_size, rank=self.rank, store=store, ) subgroup = dist.new_group([0, 1]) if not c10d._rank_not_in_group(subgroup): subgroup_seq = subgroup._get_sequence_number_for_group() obj_list = [None for _ in range(dist.get_world_size(subgroup))] dist.all_gather_object(obj_list, subgroup_seq, group=subgroup) self.assertEqual(len(set(obj_list)), 1) class CommTest(AbstractCommTest, MultiProcessTestCase): def setUp(self): super(CommTest, self).setUp() self._spawn_processes() def tearDown(self): super(CommTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass def test_distributed_debug_mode(self): # Default should be off default_debug_mode = dist._get_debug_mode() self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF) mapping = { "OFF": dist._DistributedDebugLevel.OFF, "INFO": dist._DistributedDebugLevel.INFO, "DETAIL": dist._DistributedDebugLevel.DETAIL, } invalid_debug_modes = ["foo", 0, 1, -1] for mode in mapping.keys(): os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode) set_debug_mode = dist._get_debug_mode() self.assertEqual( set_debug_mode, mapping[mode], f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}", ) for mode in invalid_debug_modes: os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode) with self.assertRaisesRegex(RuntimeError, "to be one of"): dist._get_debug_mode() class DummyWork(dist._Work): def wait(self, timeout=5.0): if torch.cuda.is_available(): torch.cuda.current_stream().synchronize() return True class DummyProcessGroup(dist.ProcessGroup): def getBackendName(self): return "Dummy" def allgather(self, output_tensor_lists, input_tensor_list, opts=None): for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list): for output_tensor in output_tensor_list: output_tensor.copy_(input_tensor) return DummyWork() def allreduce(self, tensor_list, opts=None): for tensor in tensor_list: tensor.add_(2) return DummyWork() def broadcast(self, tensor_list, opts=None): for tensor in tensor_list: tensor.add_(1) return DummyWork() def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None): for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists): output_tensor.copy_(input_tensor_list[self.rank()]) return DummyWork() def send(self, tensor_list, dst, tag=0): for tensor in tensor_list: tensor.add_(1) return DummyWork() def recv(self, tensor_list, src, tag=0): for tensor in tensor_list: tensor.add_(2) return DummyWork() class PythonProcessGroupTest(MultiProcessTestCase): def setUp(self): super(PythonProcessGroupTest, self).setUp() self._spawn_processes() def tearDown(self): super(PythonProcessGroupTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass def test_get_backend_name(self): dpg = DummyProcessGroup(0, 1) self.assertEqual("Dummy", dpg.name()) def test_backend_class_attr(self): dist.Backend.register_backend( "dummy", PythonProcessGroupTest.create_dummy ) self.assertEqual(dist.Backend.DUMMY, "DUMMY") self.assertEqual( dist.Backend._plugins["DUMMY"], PythonProcessGroupTest.create_dummy ) @staticmethod def create_dummy(store, rank, size, timeout): return DummyProcessGroup(rank, size) @unittest.skipIf( common.IS_MACOS, "Python c10d extension is not yet supported on MacOS" ) def test_collectives(self): dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy) os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '6789' dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size) # test all_gather input_tensor = torch.ones(2, 2) * 7 output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)] dist.all_gather(output_tensor_list, input_tensor) for tensor in output_tensor_list: self.assertEqual(tensor, input_tensor) # test all_reduce input_tensor = torch.ones(2, 2) * 7 dist.all_reduce(input_tensor) self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2) # test broadcast input_tensor = torch.zeros(2, 2) dist.broadcast(input_tensor, 0, async_op=True).wait() self.assertEqual(torch.ones(2, 2), input_tensor) # test reduce_scatter output_tensor = torch.zeros(2, 2) input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)] dist.reduce_scatter(output_tensor, input_tensor_list) self.assertEqual(output_tensor, torch.zeros(2, 2) + 1) dist.destroy_process_group() @unittest.skipIf( common.IS_MACOS, "Python c10d extension is not yet supported on MacOS" ) def test_send_recv(self): dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy) os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '6789' dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size) # test send input_tensor = torch.zeros(2, 2) dist.send(input_tensor, (self.rank + 1) % self.world_size) self.assertEqual(input_tensor, torch.zeros(2, 2) + 1) # test recv input_tensor = torch.zeros(2, 2) dist.recv(input_tensor, (self.rank + 1) % self.world_size) self.assertEqual(input_tensor, torch.zeros(2, 2) + 2) # intentionally not calling into `destroy_process_group` as not all # user applications would explicitly that. if __name__ == "__main__": assert ( not torch.cuda._initialized ), "test_distributed must not have initialized CUDA context on main process" run_tests()
[]
[]
[ "TORCH_DISTRIBUTED_DEBUG", "MASTER_ADDR", "MASTER_PORT" ]
[]
["TORCH_DISTRIBUTED_DEBUG", "MASTER_ADDR", "MASTER_PORT"]
python
3
0
berlapan/settings.py
""" Django settings for berlapan project. Generated by 'django-admin startproject' using Django 3.2.7. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os from pathlib import Path import dj_database_url # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # for best-practices. # SECURITY WARNING: keep the secret key used in production secret! # Please set SECRET_KEY environment variable in your production environment # (e.g. Heroku). SECRET_KEY = os.getenv('SECRET_KEY', 'django-insecure-nk@v31jj#vq_xd)s9uns%nkmj^o0efdm$-bj7dm8jz=t76_q-c') # Automatically determine environment by detecting if DATABASE_URL variable. # DATABASE_URL is provided by Heroku if a database add-on is added # (e.g. Heroku Postgres). PRODUCTION = os.getenv('DATABASE_URL') is not None # SECURITY WARNING: don't run with debug turned on in production! # If you want to enable debugging on Heroku for learning purposes, # set this to True. DEBUG = not PRODUCTION HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME', '') ALLOWED_HOSTS = [f'{HEROKU_APP_NAME}.herokuapp.com'] if not PRODUCTION: ALLOWED_HOSTS += ['.localhost', '127.0.0.1', '[::1]'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'main', 'salingbantu', 'users', 'daftar_vaksinasi', 'donordarah', 'relawanvaksin', 'corsheaders', 'rest_framework', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'corsheaders.middleware.CorsMiddleware', ] ROOT_URLCONF = 'berlapan.urls' CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_METHODS =[ 'GET', 'POST', ] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ BASE_DIR / 'templates', ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'berlapan.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Set database settings automatically using DATABASE_URL. if PRODUCTION: DATABASES['default'] = dj_database_url.config( conn_max_age=600, ssl_require=True ) # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ # Feel free to change these according to your needs. LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # This is the directory for storing `collectstatic` results. # This shouldn't be included in your Git repository. STATIC_ROOT = BASE_DIR / 'staticfiles' # You can use this directory to store project-wide static files. STATICFILES_DIRS = [ BASE_DIR / 'static', ] # Make sure the directories exist to prevent errors when doing `collectstatic`. for directory in [*STATICFILES_DIRS, STATIC_ROOT]: directory.mkdir(exist_ok=True) # Enable compression and caching features of whitenoise. # You can remove this if it causes problems on your setup.
[]
[]
[ "SECRET_KEY", "DATABASE_URL", "HEROKU_APP_NAME" ]
[]
["SECRET_KEY", "DATABASE_URL", "HEROKU_APP_NAME"]
python
3
0
examples_h2o_test.go
package adapter import ( "fmt" "io" "net/http" "net/http/cookiejar" "os" "os/exec" "path/filepath" "syscall" "testing" "time" ) func testStartH2O() (*exec.Cmd, error) { wd, _ := os.Getwd() os.Chdir(filepath.Join(wd, "examples", "h2o")) defer os.Chdir(wd) cmd := exec.Command("h2o", "-c", "h2o.conf") cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr return cmd, cmd.Start() } func testStopH2O(cmd *exec.Cmd) { cmd.Process.Signal(syscall.SIGTERM) cmd.Wait() } func TestH2O(t *testing.T) { if os.Getenv("CI") == "" { t.Log("SKIP in not CI environment. if you want this test, execute `CI=1 go test .`.") return } h2o, err := testStartH2O() if err != nil { t.Error(err) return } defer testStopH2O(h2o) fmt.Fprintln(os.Stderr, "start h2o") // wait for h2o is ready time.Sleep(time.Second) fmt.Fprintln(os.Stderr, "awake") c := NewConfig() c.Providers = map[string]map[string]interface{}{ "development": {}, } c.Cookie = &CookieConfig{ Path: "/", MaxAge: 60 * 60 * 24 * 3, Secure: false, HTTPOnly: true, SameSite: "lax", } s, err := NewServer(*c) if err != nil { t.Error(err) return } go http.ListenAndServe(":18081", s) go http.ListenAndServe(":18082", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // XXX: it seems that h2o does not support to modify requests :( // if got, expected := r.Header.Get("x-ngx-omniauth-provider"), "development"; got != expected { // t.Errorf("want %s, got %s", expected, git) // } // if got, expected := r.Header.Get("x-ngx-omniauth-user"), "developer"; got != expected { // t.Errorf("want %s, got %s", expected, git) // } // if r.Header.Get("x-ngx-omniauth-info") == "" { // t.Errorf("want x-ngx-omniauth-info is set, but empty") // } fmt.Fprintln(w, "Hello, client") })) jar, _ := cookiejar.New(nil) client := http.Client{ Jar: jar, // Note: // it takes a long time to shutdown gracefully when keep-alives is enabled. Transport: &http.Transport{DisableKeepAlives: true}, } resp, err := client.Get("http://ngx-auth-test.loopback.shogo82148.com:18080/") if err != nil { t.Error(err) } defer resp.Body.Close() b, err := io.ReadAll(resp.Body) if err != nil { t.Fatal(err) } fmt.Fprintln(os.Stderr, string(b)) if string(b) != "Hello, client\n" { t.Errorf("want Hello, client, got %s", string(b)) } }
[ "\"CI\"" ]
[]
[ "CI" ]
[]
["CI"]
go
1
0
deep_rl/utils/torch_utils.py
####################################################################### # Copyright (C) 2017 Shangtong Zhang([email protected]) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### from .config import * import torch import os import random from torch import nn import torch.nn.functional as F def select_device(gpu_id): # if torch.cuda.is_available() and gpu_id >= 0: if gpu_id >= 0: Config.DEVICE = torch.device('cuda:%d' % (gpu_id)) else: Config.DEVICE = torch.device('cpu') def tensor(x, dtype=torch.float32): if torch.is_tensor(x): return x.type(dtype) x = torch.tensor(x, device=Config.DEVICE, dtype=dtype) return x def is_cuda(x): if isinstance(x, nn.Module): return all([p.is_cuda for p in x.parameters()]) return x.is_cuda def tensor_dict(d, dtype=torch.float32): return {k: tensor(v, dtype=dtype) for k, v in d.items()} def range_tensor(end): return torch.arange(end).long().to(Config.DEVICE) def to_np(t): if torch.is_tensor(t): return t.cpu().detach().numpy() return t def random_seed(seed=None): np.random.seed(seed) random.seed(seed) torch.manual_seed(np.random.randint(int(1e6))) def set_one_thread(): os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' torch.set_num_threads(1) def huber(x, k=1.0): return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k)) def epsilon_greedy(epsilon, x): if len(x.shape) == 1: return np.random.randint(len(x)) if np.random.rand() < epsilon else np.argmax(x) elif len(x.shape) == 2: random_actions = np.random.randint(x.shape[1], size=x.shape[0]) greedy_actions = np.argmax(x, axis=-1) dice = np.random.rand(x.shape[0]) return np.where(dice < epsilon, random_actions, greedy_actions) def sync_grad(target_network, src_network): for param, src_param in zip(target_network.parameters(), src_network.parameters()): param._grad = src_param.grad.clone() def diag_gt(score_matrix): assert score_matrix.dim() == 2, 'score matrix needs dim = 2.' return torch.LongTensor(range(score_matrix.size(0))).to(score_matrix.device) def batch_linear(input, weight, bias=None): """ input: (N, D), weight: (N, D, H), bias: (N, H) """ if bias is not None: return torch.bmm(input.unsqueeze(1), weight).squeeze(1) + bias else: return torch.bmm(input.unsqueeze(1), weight).squeeze(1) class one_hot: # input: LongTensor of any shape # output one dim more, with one-hot on new dim @staticmethod def encode(indices, dim): encodings = torch.zeros(*indices.shape, dim).to(indices.device) encodings.scatter_(-1, indices.view(*indices.shape, 1), 1) return encodings # input: one_hot of any shape, last dim is one hot # output: indices of that shape @staticmethod def decode(encodings): _, indices = encodings.max(dim=-1) return indices ### optimizer ### class VanillaOptimizer: def __init__(self, params, opt, grad_clip=None): self.params = params self.opt = opt # params already passed in self.grad_clip = grad_clip def step(self, loss, retain_graph=False): self.opt.zero_grad() loss.backward(retain_graph=retain_graph) if self.grad_clip: nn.utils.clip_grad_norm_(self.params, self.grad_clip) self.opt.step() # update the first / second params using the first / second opt with freq_list[0/1] times before switching class AlternateOptimizer: def __init__(self, params_list, opt_list, freq_list, grad_clip): self.params_list = params_list self.opt_list = opt_list self.freq_list = freq_list self.grad_clip = grad_clip self.cur = 0 # current parameter to update self.t = 0 # count how many times the current parameter has been update def step(self, loss, retain_graph=False): opt = self.opt_list[self.cur] opt.zero_grad() loss.backward(retain_graph=retain_graph) nn.utils.clip_grad_norm_(self.params_list[self.cur], self.grad_clip) opt.step() self.t += 1 if self.t >= self.freq_list[self.cur]: self.t = 0 self.cur = 1 - self.cur # https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f class gumbel_softmax: @staticmethod def sample_gumbel(shape, eps=1e-20): U = torch.rand(shape) return -torch.log(-torch.log(U + eps) + eps) @staticmethod def soft_sample(logits, temperature): y = logits + gumbel_softmax.sample_gumbel(logits.size()).to(logits.device) return F.softmax(y / temperature, dim=-1) @staticmethod def hard_sample(logits, temperature): y = gumbel_softmax.soft_sample(logits, temperature) ind = y.argmax(dim=-1) y_hard = one_hot.encode(ind, logits.size(-1)) return (y_hard - y).detach() + y class relaxed_Bernolli: @staticmethod def sample_logit(shape, eps=1e-20): U = torch.rand(shape) return torch.log(U + eps) - torch.log(1 - U + eps) @staticmethod def sample(logits): return logits + relaxed_Bernolli.sample_logit(logits.size()).to(logits.device) @staticmethod def soft_sample(logits, temperature): return F.sigmoid(relaxed_Bernolli.sample(logits) / temperature) @staticmethod def hard_sample(logits, temperature): y = relaxed_Bernolli.soft_sample(logits, temperature) y_hard = (y > 0.5).type(y.dtype) return (y_hard - y).detach() + y class ListModule(nn.Module): def __init__(self, *args): super(ListModule, self).__init__() idx = 0 for module in args: self.add_module(str(idx), module) idx += 1 def __getitem__(self, idx): if idx < 0 or idx >= len(self._modules): raise IndexError('index {} is out of range'.format(idx)) it = iter(self._modules.values()) for i in range(idx): next(it) return next(it) def __iter__(self): return iter(self._modules.values()) def __len__(self): return len(self._modules)
[]
[]
[ "MKL_NUM_THREADS", "OMP_NUM_THREADS" ]
[]
["MKL_NUM_THREADS", "OMP_NUM_THREADS"]
python
2
0
providers/lastfm/lastfm_test.go
package lastfm import ( "fmt" "net/url" "os" "testing" "github.com/ImVexed/goth" "github.com/stretchr/testify/assert" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) provider := lastfmProvider() a.Equal(provider.ClientKey, os.Getenv("LASTFM_KEY")) a.Equal(provider.Secret, os.Getenv("LASTFM_SECRET")) a.Equal(provider.CallbackURL, "/foo") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), lastfmProvider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) provider := lastfmProvider() session, err := provider.BeginAuth("") s := session.(*Session) a.NoError(err) a.Contains(s.AuthURL, "www.lastfm.com.br/api/auth") a.Contains(s.AuthURL, fmt.Sprintf("api_key=%s", os.Getenv("LASTFM_KEY"))) a.Contains(s.AuthURL, fmt.Sprintf("callback=%s", url.QueryEscape("/foo"))) } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) provider := lastfmProvider() s, err := provider.UnmarshalSession(`{"AuthURL":"http://com/auth_url","AccessToken":"123456", "Login":"Quin"}`) a.NoError(err) session := s.(*Session) a.Equal(session.AuthURL, "http://com/auth_url") a.Equal(session.AccessToken, "123456") a.Equal(session.Login, "Quin") } func lastfmProvider() *Provider { return New(os.Getenv("LASTFM_KEY"), os.Getenv("LASTFM_SECRET"), "/foo") }
[ "\"LASTFM_KEY\"", "\"LASTFM_SECRET\"", "\"LASTFM_KEY\"", "\"LASTFM_KEY\"", "\"LASTFM_SECRET\"" ]
[]
[ "LASTFM_KEY", "LASTFM_SECRET" ]
[]
["LASTFM_KEY", "LASTFM_SECRET"]
go
2
0
components/teams-service/server/team_test.go
package server_test import ( "context" "errors" "os" "testing" version_api "github.com/chef/automate/api/external/common/version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "github.com/chef/automate/api/interservice/authz" "github.com/chef/automate/api/interservice/teams" "github.com/chef/automate/lib/grpc/auth_context" "github.com/chef/automate/lib/grpc/grpctest" "github.com/chef/automate/lib/grpc/secureconn" "github.com/chef/automate/lib/logger" "github.com/chef/automate/lib/pcmp/prequire" "github.com/chef/automate/lib/tls/test/helpers" "github.com/chef/automate/lib/tracing" "github.com/chef/automate/lib/version" "github.com/chef/automate/components/teams-service/constants" team_serv "github.com/chef/automate/components/teams-service/server" "github.com/chef/automate/components/teams-service/service" "github.com/chef/automate/components/teams-service/storage" "github.com/chef/automate/components/teams-service/storage/postgres/migration" "github.com/chef/automate/components/teams-service/test" ) func TestTeamsGRPC(t *testing.T) { ctx := context.Background() l, err := logger.NewLogger("text", "debug") require.NoError(t, err, "could not init logger", err) migrationConfig, err := test.MigrationConfigIfPGTestsToBeRun(l, "../storage/postgres/migration/sql") if err != nil { t.Fatalf("couldn't initialize pg config for tests: %s", err.Error()) } if migrationConfig == nil { serv, serviceRef, conn, close, authzMock := setupTeamsService(ctx, t, l, nil) runAllServerTests(t, serv, serviceRef, authzMock, teams.NewTeamsServiceClient(conn), close) } else { serv, serviceRef, conn, close, authzMock := setupTeamsService(ctx, t, l, migrationConfig) runAllServerTests(t, serv, serviceRef, authzMock, teams.NewTeamsServiceClient(conn), close) // If ciMode, run in-memory AND PG // else just run PG. if os.Getenv("CI") == "true" { serv, serviceRef, conn, close, authzMock := setupTeamsService(ctx, t, l, nil) runAllServerTests(t, serv, serviceRef, authzMock, teams.NewTeamsServiceClient(conn), close) } } } func runAllServerTests( t *testing.T, serv *team_serv.TeamServer, serviceRef *service.Service, authzMock *authz.PoliciesServiceServerMock, cl teams.TeamsServiceClient, close func()) { t.Helper() defer close() t.Run("GetVersion", func(t *testing.T) { version.Version = "20200417212701" version.BuildTime = "20200417212701" version.GitSHA = "eaf1f3553eb64fb9f393366e8ba4ee61e515727e" resp, err := cl.GetVersion(context.Background(), &version_api.VersionInfoRequest{}) require.NoError(t, err) expectedVersion := &version_api.VersionInfo{ Name: "teams-service", Version: "20200417212701", Built: "20200417212701", Sha: "eaf1f3553eb64fb9f393366e8ba4ee61e515727e", } prequire.Equal(t, expectedVersion, resp) }) t.Run("GetTeam", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when the team does not exist", func(t *testing.T) { ctx := context.Background() resp, err := cl.GetTeam(ctx, &teams.GetTeamReq{ Id: "test team", }) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err) }) t.Run("when querying for the admins team", func(t *testing.T) { ctx := context.Background() resp, err := cl.GetTeam(ctx, &teams.GetTeamReq{ Id: storage.AdminsTeamID, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, storage.AdminsTeamID, resp.Team.Id) assert.Equal(t, "admins", resp.Team.Name) }) t.Run("when the team exists", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, initResp) resp, err := cl.GetTeam(ctx, &teams.GetTeamReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, initResp.Team.Id, resp.Team.Id) assert.Equal(t, initResp.Team.Name, resp.Team.Name) cleanupTeam(t, cl, initResp.Team.Id) }) }) t.Run("ListTeams", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when the list is successfully returned", func(t *testing.T) { ctx := context.Background() resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "montag", Name: "he is a dag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "does not matter", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) list, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.NotNil(t, list) assert.Contains(t, list.Teams, resp1.Team) assert.Contains(t, list.Teams, resp2.Team) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(list.Teams)) cleanupTeam(t, cl, resp1.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when the list is successfully returned and filtered by projects", func(t *testing.T) { ctx := context.Background() resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "montag", Name: "he is a dag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "does not matter", Projects: []string{"project2"}, }) require.NoError(t, err) ctx = insertProjectsIntoNewContext([]string{"project1"}) list, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.NotNil(t, list) assert.Contains(t, list.Teams, resp1.Team) assert.Equal(t, 1, len(list.Teams)) cleanupTeam(t, cl, resp1.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when the list is successfully returned and filtered by *", func(t *testing.T) { ctx := context.Background() resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "montag", Name: "he is a dag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "does not matter", Projects: []string{"project2"}, }) require.NoError(t, err) ctx = insertProjectsIntoNewContext([]string{"*"}) list, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.NotNil(t, list) assert.Contains(t, list.Teams, resp1.Team) assert.Contains(t, list.Teams, resp2.Team) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(list.Teams)) cleanupTeam(t, cl, resp1.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when the list is successfully returned and filtered by (unassigned)", func(t *testing.T) { ctx := context.Background() resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "montag", Name: "he is a dag", }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "does not matter", Projects: []string{"other_project"}, }) require.NoError(t, err) ctx = insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}) list, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.NotNil(t, list) assert.Contains(t, list.Teams, resp1.Team) assert.Equal(t, 1+len(storage.NonDeletableTeams), len(list.Teams)) cleanupTeam(t, cl, resp1.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when there is only the non-deletable teams", func(t *testing.T) { ctx := context.Background() resp, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.NotNil(t, resp) require.Equal(t, len(storage.NonDeletableTeams), len(resp.Teams)) }) }) t.Run("CreateTeam", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when a valid team is submitted", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Id: "gotta-catch-em-all", Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) require.NotNil(t, resp) team := resp.Team assert.Equal(t, req.Id, team.Id) assert.Equal(t, req.Name, team.Name) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when no projects are passed", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Id: "gotta-catch-em-all", Name: "Corgis Inc.", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) require.NotNil(t, resp) team := resp.Team assert.Equal(t, req.Id, team.Id) assert.Equal(t, req.Name, team.Name) assert.Equal(t, 0, len(team.Projects)) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when the team exists", func(t *testing.T) { ctx := context.Background() resp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "some-name", Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "some-name", Name: "does not matter", Projects: []string{"project1", "project2"}, }) assert.Nil(t, resp2) grpctest.AssertCode(t, codes.AlreadyExists, err) cleanupTeam(t, cl, resp.Team.Id) }) }) t.Run("DeleteTeam", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when an existing team is deleted", func(t *testing.T) { ctx := context.Background() teamToDeleteName := "First Name" authzMock.PurgeSubjectFromPoliciesFunc = func( _ context.Context, req *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { if req.Subject == "team:local:"+teamToDeleteName { return &authz.PurgeSubjectFromPoliciesResp{}, nil } return nil, errors.New("unexpected team name passed to PurgeSubjectFromPolicies") } resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: teamToDeleteName, Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, resp1) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, resp2) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp1.Team.Id}) require.NoError(t, err2) require.NotNil(t, resp) assert.Equal(t, resp1.Team.Id, resp.Team.Id) assert.Equal(t, resp1.Team.Name, resp.Team.Name) teamListAfter, err3 := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err3) assert.Equal(t, len(storage.NonDeletableTeams), len(teamListAfter.Teams)-1) assert.Contains(t, teamListAfter.Teams, resp2.Team) authzMock.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when an existing team is deleted and is in the project filter", func(t *testing.T) { ctx := context.Background() teamToDeleteName := "First Name" authzMock.PurgeSubjectFromPoliciesFunc = func( _ context.Context, req *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { if req.Subject == "team:local:"+teamToDeleteName { return &authz.PurgeSubjectFromPoliciesResp{}, nil } return nil, errors.New("unexpected team name passed to PurgeSubjectFromPolicies") } resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: teamToDeleteName, Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, resp1) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1"}, }) require.NoError(t, err) require.NotNil(t, resp2) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) ctx = insertProjectsIntoNewContext([]string{"project2"}) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp1.Team.Id}) require.NoError(t, err2) require.NotNil(t, resp) assert.Equal(t, resp1.Team.Id, resp.Team.Id) assert.Equal(t, resp1.Team.Name, resp.Team.Name) teamListAfter, err3 := cl.ListTeams(context.Background(), &teams.ListTeamsReq{}) require.NoError(t, err3) assert.Equal(t, len(storage.NonDeletableTeams), len(teamListAfter.Teams)-1) assert.Contains(t, teamListAfter.Teams, resp2.Team) authzMock.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when an existing team is deleted and the project filter is *", func(t *testing.T) { ctx := context.Background() teamToDeleteName := "First Name" authzMock.PurgeSubjectFromPoliciesFunc = func( _ context.Context, req *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { if req.Subject == "team:local:"+teamToDeleteName { return &authz.PurgeSubjectFromPoliciesResp{}, nil } return nil, errors.New("unexpected team name passed to PurgeSubjectFromPolicies") } resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: teamToDeleteName, Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, resp1) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1"}, }) require.NoError(t, err) require.NotNil(t, resp2) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) ctx = insertProjectsIntoNewContext([]string{"*"}) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp1.Team.Id}) require.NoError(t, err2) require.NotNil(t, resp) assert.Equal(t, resp1.Team.Id, resp.Team.Id) assert.Equal(t, resp1.Team.Name, resp.Team.Name) teamListAfter, err3 := cl.ListTeams(context.Background(), &teams.ListTeamsReq{}) require.NoError(t, err3) assert.Equal(t, len(storage.NonDeletableTeams), len(teamListAfter.Teams)-1) assert.Contains(t, teamListAfter.Teams, resp2.Team) authzMock.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when an existing team is deleted and the project filter is (unassigned)", func(t *testing.T) { ctx := context.Background() teamToDeleteName := "First Name" authzMock.PurgeSubjectFromPoliciesFunc = func( _ context.Context, req *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { if req.Subject == "team:local:"+teamToDeleteName { return &authz.PurgeSubjectFromPoliciesResp{}, nil } return nil, errors.New("unexpected team name passed to PurgeSubjectFromPolicies") } resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: teamToDeleteName, Name: "montag", Projects: []string{}, }) require.NoError(t, err) require.NotNil(t, resp1) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1"}, }) require.NoError(t, err) require.NotNil(t, resp2) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) ctx = insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp1.Team.Id}) require.NoError(t, err2) require.NotNil(t, resp) assert.Equal(t, resp1.Team.Id, resp.Team.Id) assert.Equal(t, resp1.Team.Name, resp.Team.Name) teamListAfter, err3 := cl.ListTeams(context.Background(), &teams.ListTeamsReq{}) require.NoError(t, err3) assert.Equal(t, len(storage.NonDeletableTeams), len(teamListAfter.Teams)-1) assert.Contains(t, teamListAfter.Teams, resp2.Team) authzMock.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when an existing team is filtered by projects return NotFound", func(t *testing.T) { ctx := context.Background() teamToDeleteName := "First Name" resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: teamToDeleteName, Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, resp1) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1"}, }) require.NoError(t, err) require.NotNil(t, resp2) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) assert.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) ctx = insertProjectsIntoNewContext([]string{"project2"}) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp2.Team.Id}) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err2) cleanupTeam(t, cl, resp1.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when an existing team is deleted but the deletion of their policy membership fails", func(t *testing.T) { ctx := context.Background() authzMock.PurgeSubjectFromPoliciesFunc = func( _ context.Context, req *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { return nil, errors.New("test failure of PurgeSubjectFromPolicies") } resp1, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "First Name", Name: "montag", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) resp2, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "Other Name", Name: "does not matter", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) teamListBefore, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) require.Equal(t, 2+len(storage.NonDeletableTeams), len(teamListBefore.Teams)) resp, err2 := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: resp1.Team.Id}) require.Nil(t, resp) require.NotNil(t, err2) grpctest.AssertCode(t, codes.Internal, err2) teamListAfter, err3 := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err3) assert.Equal(t, 1+len(storage.NonDeletableTeams), len(teamListAfter.Teams)) assert.Contains(t, teamListAfter.Teams, resp2.Team) authzMock.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when the team to delete is not found", func(t *testing.T) { ctx := context.Background() resp, err := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: "some-wrong-id"}) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err) }) t.Run("when attempting to delete a team that is not allowed to be deleted", func(t *testing.T) { ctx := context.Background() resp, err := cl.DeleteTeam(ctx, &teams.DeleteTeamReq{Id: storage.AdminsTeamID}) require.Nil(t, resp) grpctest.AssertCode(t, codes.InvalidArgument, err) }) }) t.Run("UpdateTeam", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when a valid team update request is submitted", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) newName := "Gotta Catch Only The Most Special" updateReq := &teams.UpdateTeamReq{ Id: id, Name: newName, Projects: []string{"project2", "project3"}, } updatedTeamResp, err := cl.UpdateTeam(ctx, updateReq) require.NoError(t, err, "update team") require.NotNil(t, resp) assert.Equal(t, updateReq.Id, updatedTeamResp.Team.Id) assert.Equal(t, updateReq.Name, updatedTeamResp.Team.Name) assert.Equal(t, updateReq.Projects, updatedTeamResp.Team.Projects) teamsList, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err, "reading back teams") require.Equal(t, 2, len(teamsList.Teams)) var updatedTeam *teams.Team if teamsList.Teams[0].Id != storage.AdminsTeamID { updatedTeam = teamsList.Teams[0] } else { updatedTeam = teamsList.Teams[1] } assert.Equal(t, newName, updatedTeam.Name) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when a valid team update request is submitted with the project filter", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) newName := "Gotta Catch Only The Most Special" updateReq := &teams.UpdateTeamReq{ Id: id, Name: newName, Projects: []string{"project2", "project3"}, } updatedTeamResp, err := cl.UpdateTeam(insertProjectsIntoNewContext([]string{"project2"}), updateReq) require.NoError(t, err, "update team") require.NotNil(t, resp) assert.Equal(t, updateReq.Id, updatedTeamResp.Team.Id) assert.Equal(t, updateReq.Name, updatedTeamResp.Team.Name) assert.Equal(t, updateReq.Projects, updatedTeamResp.Team.Projects) teamsList, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err, "reading back teams") require.Equal(t, 2, len(teamsList.Teams)) var updatedTeam *teams.Team if teamsList.Teams[0].Id != storage.AdminsTeamID { updatedTeam = teamsList.Teams[0] } else { updatedTeam = teamsList.Teams[1] } assert.Equal(t, newName, updatedTeam.Name) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when a valid team update request is submitted with the project filter of *", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) newName := "Gotta Catch Only The Most Special" updateReq := &teams.UpdateTeamReq{ Id: id, Name: newName, Projects: []string{"project2", "project3"}, } updatedTeamResp, err := cl.UpdateTeam(insertProjectsIntoNewContext([]string{"*"}), updateReq) require.NoError(t, err, "update team") require.NotNil(t, resp) assert.Equal(t, updateReq.Id, updatedTeamResp.Team.Id) assert.Equal(t, updateReq.Name, updatedTeamResp.Team.Name) assert.Equal(t, updateReq.Projects, updatedTeamResp.Team.Projects) teamsList, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err, "reading back teams") require.Equal(t, 2, len(teamsList.Teams)) var updatedTeam *teams.Team if teamsList.Teams[0].Id != storage.AdminsTeamID { updatedTeam = teamsList.Teams[0] } else { updatedTeam = teamsList.Teams[1] } assert.Equal(t, newName, updatedTeam.Name) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when a valid team update request is submitted with the project filter of (unassigned)", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) newName := "Gotta Catch Only The Most Special" updateReq := &teams.UpdateTeamReq{ Id: id, Name: newName, Projects: []string{"project2", "project3"}, } updatedTeamResp, err := cl.UpdateTeam(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), updateReq) require.NoError(t, err, "update team") require.NotNil(t, resp) assert.Equal(t, updateReq.Id, updatedTeamResp.Team.Id) assert.Equal(t, updateReq.Name, updatedTeamResp.Team.Name) assert.Equal(t, updateReq.Projects, updatedTeamResp.Team.Projects) teamsList, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err, "reading back teams") require.Equal(t, 2, len(teamsList.Teams)) var updatedTeam *teams.Team if teamsList.Teams[0].Id != storage.AdminsTeamID { updatedTeam = teamsList.Teams[0] } else { updatedTeam = teamsList.Teams[1] } assert.Equal(t, newName, updatedTeam.Name) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when a valid team update request is submitted but is excluded by the project filter", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) newName := "Gotta Catch Only The Most Special" updateReq := &teams.UpdateTeamReq{ Id: id, Name: newName, Projects: []string{"project2", "project3"}, } updatedTeamResp, err := cl.UpdateTeam(insertProjectsIntoNewContext([]string{"project3"}), updateReq) require.Nil(t, updatedTeamResp) grpctest.AssertCode(t, codes.NotFound, err) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when the team exists but all projects are removed", func(t *testing.T) { ctx := context.Background() id := "gotta-catch-em-all" req := &teams.CreateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) updateReq := &teams.UpdateTeamReq{ Id: id, Name: "Corgis Inc.", Projects: []string{}, } updatedTeamResp, err := cl.UpdateTeam(ctx, updateReq) require.NoError(t, err) require.NotNil(t, resp) team := updatedTeamResp.Team assert.Equal(t, req.Id, team.Id) assert.Equal(t, req.Name, team.Name) assert.Equal(t, 0, len(team.Projects)) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when team to update does not exist", func(t *testing.T) { ctx := context.Background() updateReq := &teams.UpdateTeamReq{ Id: "not-found-id", Name: "Corgis Inc.", Projects: []string{"project1", "project2"}, } updatedTeam, err := cl.UpdateTeam(ctx, updateReq) require.Nil(t, updatedTeam) grpctest.AssertCode(t, codes.NotFound, err) }) }) t.Run("AddTeamMembers", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("successfully adds user", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: req.Id, UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(ctx, addReq) // assert require.NoError(t, err) require.NotNil(t, resp2) assert.Equal(t, len(addReq.UserIds), len(resp2.UserIds)) assert.ElementsMatch(t, addReq.UserIds, resp2.UserIds) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("successfully adds user when the project filter matches", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{"project1"}), addReq) // assert require.NoError(t, err) require.NotNil(t, resp2) assert.Equal(t, len(addReq.UserIds), len(resp2.UserIds)) assert.ElementsMatch(t, addReq.UserIds, resp2.UserIds) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("successfully adds user with a project filter of *", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{"*"}), addReq) // assert require.NoError(t, err) require.NotNil(t, resp2) assert.Equal(t, len(addReq.UserIds), len(resp2.UserIds)) assert.ElementsMatch(t, addReq.UserIds, resp2.UserIds) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("successfully adds user with a project filter of (unassigned)", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), addReq) // assert require.NoError(t, err) require.NotNil(t, resp2) assert.Equal(t, len(addReq.UserIds), len(resp2.UserIds)) assert.ElementsMatch(t, addReq.UserIds, resp2.UserIds) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("successfully adds user to team with existing users", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) targetMemberID := "88f13b6b-b20b-4335-9fd6-2c09edf45cf9" resp1, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), &teams.AddTeamMembersReq{ Id: req.Id, UserIds: []string{targetMemberID}, }) require.NoError(t, err) require.Equal(t, 1, len(resp1.UserIds)) addReq := &teams.AddTeamMembersReq{ Id: req.Id, UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), addReq) // assert require.NoError(t, err) require.NotNil(t, resp2) assert.Equal(t, len(addReq.UserIds)+1, len(resp2.UserIds)) expectedUsers := append(addReq.UserIds, targetMemberID) assert.ElementsMatch(t, expectedUsers, resp2.UserIds) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("fails to adds user with NotFound when a project filter that excludes the team", func(t *testing.T) { tests := []struct { users []string desc string }{ {[]string{"6ed95714-9466-463b-80da-0513ecb42a08"}, "single user"}, {[]string{ "299ea25b-62d4-4660-965a-e25870298792", "d1f642c8-8907-4e8b-a9a0-b998a44dc4bf", }, "multiple users"}, } for _, test := range tests { t.Run("when provided valid team and "+test.desc, func(t *testing.T) { ctx := context.Background() // arrange req := &teams.CreateTeamReq{ Name: "Gotta Catch Em All", Id: "corgis-inc", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: test.users, } // act resp2, err := cl.AddTeamMembers(insertProjectsIntoNewContext([]string{"wrong_project"}), addReq) // assert require.Nil(t, resp2) grpctest.AssertCode(t, codes.NotFound, err) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("when team exists and user has already been added, does not add duplicate user", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Name: "with, learning, & wisdom", Id: "ravenclaw", Projects: []string{"project1", "project2"}, } createTeam, err := cl.CreateTeam(ctx, req) require.NoError(t, err) users := []string{"some-id"} addReq := &teams.AddTeamMembersReq{ Id: createTeam.GetTeam().GetId(), UserIds: users, } // add user first time resp, err := cl.AddTeamMembers(ctx, addReq) require.NoError(t, err, "first add") assert.Equal(t, len(users), len(resp.UserIds)) assert.ElementsMatch(t, users, resp.UserIds) // attempt to add user second time resp2, err := cl.AddTeamMembers(ctx, addReq) require.NoError(t, err, "second add") assert.Equal(t, len(users), len(resp2.UserIds)) assert.ElementsMatch(t, users, resp2.UserIds) cleanupTeam(t, cl, createTeam.Team.Id) }) t.Run("when team does not exist, returns Not Found error", func(t *testing.T) { ctx := context.Background() addReq := &teams.AddTeamMembersReq{ Id: "not-found", UserIds: []string{"a-user"}, } resp, err := cl.AddTeamMembers(ctx, addReq) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err) }) t.Run("when no user ids provided, returns invalid request", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Name: "with, learning, & wisdom", Id: "ravenclaw", Projects: []string{"project1", "project2"}, } _, err := cl.CreateTeam(ctx, req) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: "ravenclaw", UserIds: []string{}, } resp, err := cl.AddTeamMembers(ctx, addReq) require.Nil(t, resp) grpctest.AssertCode(t, codes.InvalidArgument, err) }) }) t.Run("RemoveTeamMembers", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when team does not exist, returns NotFound error", func(t *testing.T) { ctx := context.Background() req := &teams.RemoveTeamMembersReq{ Id: "not-found-id", UserIds: []string{"some-id"}, } updatedTeam, err := cl.RemoveTeamMembers(ctx, req) require.Nil(t, updatedTeam) grpctest.AssertCode(t, codes.NotFound, err) }) t.Run("when team exists without users the list remains empty", func(t *testing.T) { ctx := context.Background() createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{"project1", "project2"}, } createTeam, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) users := []string{"user-1", "user-2"} req := &teams.RemoveTeamMembersReq{ Id: createTeam.Team.Id, UserIds: users, } resp, err := cl.RemoveTeamMembers(ctx, req) require.NoError(t, err) assert.Equal(t, 0, len(resp.UserIds)) cleanupTeam(t, cl, createTeam.Team.Id) }) t.Run("when team exists with a project filter", func(t *testing.T) { ctx := context.Background() createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: []string{ "user-1", "user-2", "user-3", }, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) req := &teams.RemoveTeamMembersReq{ Id: resp.Team.Id, UserIds: []string{ "user-1", "user-2", }, } removeResp, err := cl.RemoveTeamMembers(insertProjectsIntoNewContext([]string{"project1", "other"}), req) require.NoError(t, err) assert.Equal(t, 1, len(removeResp.UserIds)) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when team exists with a project filter of *", func(t *testing.T) { ctx := context.Background() createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: []string{ "user-1", "user-2", "user-3", }, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) req := &teams.RemoveTeamMembersReq{ Id: resp.Team.Id, UserIds: []string{ "user-1", "user-2", }, } removeResp, err := cl.RemoveTeamMembers(insertProjectsIntoNewContext([]string{"*"}), req) require.NoError(t, err) assert.Equal(t, 1, len(removeResp.UserIds)) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when team exists with a project filter of (unassigned)", func(t *testing.T) { ctx := context.Background() createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: []string{ "user-1", "user-2", "user-3", }, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) req := &teams.RemoveTeamMembersReq{ Id: resp.Team.Id, UserIds: []string{ "user-1", "user-2", }, } removeResp, err := cl.RemoveTeamMembers(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), req) require.NoError(t, err) assert.Equal(t, 1, len(removeResp.UserIds)) cleanupTeam(t, cl, resp.Team.Id) }) t.Run("when team exists with a project filter that excludes the team", func(t *testing.T) { ctx := context.Background() createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{"project1"}, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: []string{ "user-1", "user-2", "user-3", }, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) req := &teams.RemoveTeamMembersReq{ Id: resp.Team.Id, UserIds: []string{ "user-1", "user-2", }, } removeResp, err := cl.RemoveTeamMembers(insertProjectsIntoNewContext([]string{"wrong"}), req) require.Nil(t, removeResp) grpctest.AssertCode(t, codes.NotFound, err) cleanupTeam(t, cl, resp.Team.Id) }) tests := map[string]struct { usersToStart []string usersToRemove []string expectedLengthRemaining int }{ "with the same set of users as to delete, the list becomes empty": { usersToStart: []string{ "user-1", "user-2", }, usersToRemove: []string{ "user-1", "user-2", }, expectedLengthRemaining: 0, }, "with intersecting users existing and to remove, the list is updated": { usersToStart: []string{ "user-1", "user-2", "user-3", }, usersToRemove: []string{ "user-1", "user-2", }, expectedLengthRemaining: 1, }, "with users, but an empty user list is passed": { usersToStart: []string{ "user-1", "user-2", "user-3", }, usersToRemove: []string{}, expectedLengthRemaining: 3, }, } for desc, test := range tests { ctx := context.Background() t.Run("when team exists "+desc, func(t *testing.T) { createReq := &teams.CreateTeamReq{ Name: "Guard the galaxy (with dope music)", Id: "guardians", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: test.usersToStart, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) req := &teams.RemoveTeamMembersReq{ Id: resp.Team.Id, UserIds: test.usersToRemove, } removeResp, err := cl.RemoveTeamMembers(ctx, req) require.NoError(t, err) assert.Equal(t, test.expectedLengthRemaining, len(removeResp.UserIds)) cleanupTeam(t, cl, resp.Team.Id) }) } }) t.Run("GetTeamsForMember", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when valid member id provided with a project filter, "+ "returns array of teams that are in project", func(t *testing.T) { ctx := context.Background() // create first team req := &teams.CreateTeamReq{ Name: "daring, nerve, & chivalry", Id: "gryffindor", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) req2 := &teams.CreateTeamReq{ Name: "save the wizarding world", Id: "aurors", Projects: []string{"project1", "project3"}, } resp2, err := cl.CreateTeam(ctx, req2) require.NoError(t, err) req3 := &teams.CreateTeamReq{ Name: "destroy the wizarding world", Id: "death-eaters", Projects: []string{}, } resp3, err := cl.CreateTeam(ctx, req3) require.NoError(t, err) users := []string{"user-1"} addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) addReq2 := &teams.AddTeamMembersReq{ Id: resp2.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq2) require.NoError(t, err) addReq3 := &teams.AddTeamMembersReq{ Id: resp3.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq3) require.NoError(t, err) listReq := &teams.GetTeamsForMemberReq{ UserId: users[0], } fetchedData, err := cl.GetTeamsForMember( insertProjectsIntoNewContext([]string{"project2", constants.UnassignedProjectID}), listReq) require.NoError(t, err) require.NotNil(t, fetchedData) assert.Equal(t, 2, len(fetchedData.Teams)) fetchedTeamIDs := []string{fetchedData.Teams[0].Id, fetchedData.Teams[1].Id} assert.Contains(t, fetchedTeamIDs, resp.Team.Id) assert.Contains(t, fetchedTeamIDs, resp3.Team.Id) cleanupTeam(t, cl, resp.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) cleanupTeam(t, cl, resp3.Team.Id) }) t.Run("when valid member id provided with a project filter of *, returns array of all teams", func(t *testing.T) { ctx := context.Background() // create first team req := &teams.CreateTeamReq{ Name: "daring, nerve, & chivalry", Id: "gryffindor", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) // create second team req2 := &teams.CreateTeamReq{ Name: "save the wizarding world", Id: "aurors", Projects: []string{"project1", "project2"}, } resp2, err := cl.CreateTeam(ctx, req2) require.NoError(t, err) // add user to first team users := []string{"user-1"} addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) // add user to second team addReq2 := &teams.AddTeamMembersReq{ Id: resp2.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq2) require.NoError(t, err) // get user's teams listReq := &teams.GetTeamsForMemberReq{ UserId: users[0], } fetchedData, err := cl.GetTeamsForMember(insertProjectsIntoNewContext([]string{"*"}), listReq) require.NoError(t, err) require.NotNil(t, fetchedData) assert.Equal(t, 2, len(fetchedData.Teams)) fetchedTeamIDs := []string{fetchedData.Teams[0].Id, fetchedData.Teams[1].Id} assert.Contains(t, fetchedTeamIDs, resp.Team.Id) assert.Contains(t, fetchedTeamIDs, resp2.Team.Id) cleanupTeam(t, cl, resp.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when valid member id provided with a project filter of (unassigned), "+ "returns array of (unassigned) teams", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Name: "daring, nerve, & chivalry", Id: "gryffindor", Projects: []string{}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) req2 := &teams.CreateTeamReq{ Name: "save the wizarding world", Id: "aurors", Projects: []string{"project1", "project2"}, } resp2, err := cl.CreateTeam(ctx, req2) require.NoError(t, err) req3 := &teams.CreateTeamReq{ Name: "destroy the wizarding world", Id: "death-eaters", Projects: []string{}, } resp3, err := cl.CreateTeam(ctx, req3) require.NoError(t, err) users := []string{"user-1"} addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) addReq2 := &teams.AddTeamMembersReq{ Id: resp2.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq2) require.NoError(t, err) addReq3 := &teams.AddTeamMembersReq{ Id: resp3.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq3) require.NoError(t, err) listReq := &teams.GetTeamsForMemberReq{ UserId: users[0], } fetchedData, err := cl.GetTeamsForMember(insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), listReq) require.NoError(t, err) require.NotNil(t, fetchedData) assert.Equal(t, 2, len(fetchedData.Teams)) fetchedTeamIDs := []string{fetchedData.Teams[0].Id, fetchedData.Teams[1].Id} assert.Contains(t, fetchedTeamIDs, resp.Team.Id) assert.Contains(t, fetchedTeamIDs, resp3.Team.Id) cleanupTeam(t, cl, resp.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) cleanupTeam(t, cl, resp3.Team.Id) }) t.Run("when valid member id and project filter provided, returns array of teams", func(t *testing.T) { ctx := context.Background() // create first team req := &teams.CreateTeamReq{ Name: "daring, nerve, & chivalry", Id: "gryffindor", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) // create second team req2 := &teams.CreateTeamReq{ Name: "save the wizarding world", Id: "aurors", Projects: []string{"project1", "project2"}, } resp2, err := cl.CreateTeam(ctx, req2) require.NoError(t, err) // add user to first team users := []string{"user-1"} addReq := &teams.AddTeamMembersReq{ Id: resp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) // add user to second team addReq2 := &teams.AddTeamMembersReq{ Id: resp2.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq2) require.NoError(t, err) // get user's teams listReq := &teams.GetTeamsForMemberReq{ UserId: users[0], } fetchedData, err := cl.GetTeamsForMember(ctx, listReq) require.NoError(t, err) require.NotNil(t, fetchedData) assert.Equal(t, 2, len(fetchedData.Teams)) fetchedTeamIDs := []string{fetchedData.Teams[0].Id, fetchedData.Teams[1].Id} assert.Contains(t, fetchedTeamIDs, resp.Team.Id) assert.Contains(t, fetchedTeamIDs, resp2.Team.Id) cleanupTeam(t, cl, resp.Team.Id) cleanupTeam(t, cl, resp2.Team.Id) }) t.Run("when user id does not exist on any teams, returns empty array", func(t *testing.T) { ctx := context.Background() req := &teams.CreateTeamReq{ Name: "cunning & ambitious", Id: "slytherin", Projects: []string{"project1", "project2"}, } resp, err := cl.CreateTeam(ctx, req) require.NoError(t, err) listReq := &teams.GetTeamsForMemberReq{ UserId: "user-1", } fetchedData, err := cl.GetTeamsForMember(ctx, listReq) require.NoError(t, err) require.NotNil(t, fetchedData) assert.Empty(t, fetchedData.Teams) cleanupTeam(t, cl, resp.Team.Id) }) }) t.Run("GetTeamMembership", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when the team does not exist", func(t *testing.T) { ctx := context.Background() resp, err := cl.GetTeamMembership(ctx, &teams.GetTeamMembershipReq{ Id: "not-found", }) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err) }) t.Run("when the team exists but has no members", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, initResp) resp, err := cl.GetTeamMembership(ctx, &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, 0, len(resp.UserIds)) cleanupTeam(t, cl, initResp.Team.Id) }) t.Run("when the team exists with members", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, initResp) users := []string{"user-1", "user-2"} addReq := &teams.AddTeamMembersReq{ Id: initResp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) resp, err := cl.GetTeamMembership(ctx, &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, len(users), len(resp.UserIds)) assert.ElementsMatch(t, users, resp.UserIds) cleanupTeam(t, cl, initResp.Team.Id) }) t.Run("when the team exists with members and is in the project filter", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, initResp) users := []string{"user-1", "user-2"} addReq := &teams.AddTeamMembersReq{ Id: initResp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) resp, err := cl.GetTeamMembership( insertProjectsIntoNewContext([]string{"project1"}), &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, len(users), len(resp.UserIds)) assert.ElementsMatch(t, users, resp.UserIds) cleanupTeam(t, cl, initResp.Team.Id) }) t.Run("when the team exists with members and the project filter is *", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1", "project2"}, }) require.NoError(t, err) require.NotNil(t, initResp) users := []string{"user-1", "user-2"} addReq := &teams.AddTeamMembersReq{ Id: initResp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) resp, err := cl.GetTeamMembership( insertProjectsIntoNewContext([]string{"*"}), &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, len(users), len(resp.UserIds)) assert.ElementsMatch(t, users, resp.UserIds) cleanupTeam(t, cl, initResp.Team.Id) }) t.Run("when the team exists with members and the project filter is (unassigned)", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{}, }) require.NoError(t, err) require.NotNil(t, initResp) users := []string{"user-1", "user-2"} addReq := &teams.AddTeamMembersReq{ Id: initResp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) resp, err := cl.GetTeamMembership( insertProjectsIntoNewContext([]string{constants.UnassignedProjectID}), &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.NoError(t, err) require.NotNil(t, resp) assert.Equal(t, len(users), len(resp.UserIds)) assert.ElementsMatch(t, users, resp.UserIds) cleanupTeam(t, cl, initResp.Team.Id) }) t.Run("when the team exists with members and the project filter is excludes the team", func(t *testing.T) { ctx := context.Background() initResp, err := cl.CreateTeam(ctx, &teams.CreateTeamReq{ Id: "other-team", Name: "i can be the very best...", Projects: []string{"project1"}, }) require.NoError(t, err) require.NotNil(t, initResp) users := []string{"user-1", "user-2"} addReq := &teams.AddTeamMembersReq{ Id: initResp.GetTeam().GetId(), UserIds: users, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) resp, err := cl.GetTeamMembership( insertProjectsIntoNewContext([]string{"wrong"}), &teams.GetTeamMembershipReq{ Id: initResp.Team.Id, }) require.Nil(t, resp) grpctest.AssertCode(t, codes.NotFound, err) cleanupTeam(t, cl, initResp.Team.Id) }) }) t.Run("PurgeUserMembership", func(t *testing.T) { resetState(context.Background(), t, serviceRef) t.Run("when user id is not passed, returns InvalidArgument error", func(t *testing.T) { ctx := context.Background() req := &teams.PurgeUserMembershipReq{ UserId: "", } resp, err := cl.PurgeUserMembership(ctx, req) require.Nil(t, resp) grpctest.AssertCode(t, codes.InvalidArgument, err) }) tests := map[string]struct { userToPurge string initialTeamsAndMembers map[string][]string expectedTeamsAndMembers map[string][]string expectedUpdatedTeamIDs map[string]bool }{ "when is only the admins team": { userToPurge: "f2f5300c-48dc-4633-8ac8-2bcf814e7b8a", initialTeamsAndMembers: map[string][]string{}, expectedTeamsAndMembers: map[string][]string{ storage.AdminsTeamID: {}, }, expectedUpdatedTeamIDs: map[string]bool{}, }, `when there are multiple teams and the purged user is a member of one of them, only one team should be updated`: { userToPurge: "2041bad7-8ae4-418b-9e66-6af87838ab97", initialTeamsAndMembers: map[string][]string{ "team1": { "2041bad7-8ae4-418b-9e66-6af87838ab97", "2041bad8-8ae4-418b-9e66-6af87838ab97", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, "team2": { "c34d1891-907e-4677-bc90-458c9e94f772", "c34d1892-907e-4677-bc90-458c9e94f772", "c34d1893-907e-4677-bc90-458c9e94f772", }, }, expectedTeamsAndMembers: map[string][]string{ storage.AdminsTeamID: {}, "team1": { "2041bad8-8ae4-418b-9e66-6af87838ab97", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, "team2": { "c34d1891-907e-4677-bc90-458c9e94f772", "c34d1892-907e-4677-bc90-458c9e94f772", "c34d1893-907e-4677-bc90-458c9e94f772", }, }, expectedUpdatedTeamIDs: map[string]bool{"team1": true}, }, `when there is only one team besides the admins team and the deleted user is a member the team should be updated`: { userToPurge: "2041bad8-8ae4-418b-9e66-6af87838ab97", initialTeamsAndMembers: map[string][]string{ "team1": { "2041bad7-8ae4-418b-9e66-6af87838ab97", "2041bad8-8ae4-418b-9e66-6af87838ab97", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, }, expectedTeamsAndMembers: map[string][]string{ storage.AdminsTeamID: {}, "team1": { "2041bad7-8ae4-418b-9e66-6af87838ab97", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, }, expectedUpdatedTeamIDs: map[string]bool{"team1": true}, }, `when there are multiple teams and the purged user is a member of both, both teams should be updated`: { userToPurge: "2041bad9-8ae4-418b-9e66-6af87838ab97", initialTeamsAndMembers: map[string][]string{ "team1": { "2041bad7-8ae4-418b-9e66-6af87838ab97", "2041bad8-8ae4-418b-9e66-6af87838ab97", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, "team2": { "c34d1891-907e-4677-bc90-458c9e94f772", "c34d1892-907e-4677-bc90-458c9e94f772", "c34d1893-907e-4677-bc90-458c9e94f772", "2041bad9-8ae4-418b-9e66-6af87838ab97", }, }, expectedTeamsAndMembers: map[string][]string{ storage.AdminsTeamID: {}, "team1": { "2041bad7-8ae4-418b-9e66-6af87838ab97", "2041bad8-8ae4-418b-9e66-6af87838ab97", }, "team2": { "c34d1891-907e-4677-bc90-458c9e94f772", "c34d1892-907e-4677-bc90-458c9e94f772", "c34d1893-907e-4677-bc90-458c9e94f772", }, }, expectedUpdatedTeamIDs: map[string]bool{"team1": true, "team2": true}, }, `when there are multiple teams and the purged user is a member of none, neither team should be updated`: { userToPurge: "d989bca0-4535-444c-8300-24bec6aa446e", initialTeamsAndMembers: map[string][]string{ "team1": { "f6d4e661-15a7-4514-b1a4-60a00becde58", "f6d4e662-15a7-4514-b1a4-60a00becde58", "f6d4e663-15a7-4514-b1a4-60a00becde58", }, "team2": { "e7dedee5-7942-49a7-8735-f24421224f40", "e7dedee6-7942-49a7-8735-f24421224f40", "e7dedee7-7942-49a7-8735-f24421224f40", }, }, expectedTeamsAndMembers: map[string][]string{ storage.AdminsTeamID: {}, "team1": { "f6d4e661-15a7-4514-b1a4-60a00becde58", "f6d4e662-15a7-4514-b1a4-60a00becde58", "f6d4e663-15a7-4514-b1a4-60a00becde58", }, "team2": { "e7dedee5-7942-49a7-8735-f24421224f40", "e7dedee6-7942-49a7-8735-f24421224f40", "e7dedee7-7942-49a7-8735-f24421224f40", }, }, expectedUpdatedTeamIDs: map[string]bool{}, }, } for desc, test := range tests { t.Run(desc, func(t *testing.T) { ctx := context.Background() var expectedResponseIds []string var allCreatedIds []string for team, members := range test.initialTeamsAndMembers { createReq := &teams.CreateTeamReq{ Name: "ignored", Id: team, } resp, err := cl.CreateTeam(ctx, createReq) require.NoError(t, err) addReq := &teams.AddTeamMembersReq{ Id: createReq.Id, UserIds: members, } _, err = cl.AddTeamMembers(ctx, addReq) require.NoError(t, err) allCreatedIds = append(allCreatedIds, createReq.Id) if _, isExpectedInResponse := test.expectedUpdatedTeamIDs[team]; isExpectedInResponse { expectedResponseIds = append(expectedResponseIds, resp.GetTeam().GetId()) } } req := &teams.PurgeUserMembershipReq{ UserId: test.userToPurge, } resp, err := cl.PurgeUserMembership(ctx, req) // Check that IDs of updated teams returned by API // match what we expected. require.NoError(t, err) require.NotNil(t, resp) assert.ElementsMatch(t, expectedResponseIds, resp.Ids) // Check that user membership was properly updated finalTeamsState, err := cl.ListTeams(ctx, &teams.ListTeamsReq{}) require.NoError(t, err) for _, team := range finalTeamsState.GetTeams() { expectedTeamMembers, found := test.expectedTeamsAndMembers[team.Id] require.Equal(t, found, true) assert.NotNil(t, expectedTeamMembers) usersReq := &teams.GetTeamMembershipReq{ Id: team.Id, } usersResp, err := cl.GetTeamMembership(ctx, usersReq) require.NoError(t, err) assert.ElementsMatch(t, expectedTeamMembers, usersResp.UserIds) } // Cleanup for _, teamID := range allCreatedIds { cleanupTeam(t, cl, teamID) } }) } }) } func cleanupTeam(t *testing.T, cl teams.TeamsServiceClient, id string) { t.Helper() deleteReq := teams.DeleteTeamReq{Id: id} _, err := cl.DeleteTeam(context.Background(), &deleteReq) require.NoError(t, err) } // Pass nil for migrationConfig if you want in-memory server. func setupTeamsService(ctx context.Context, t *testing.T, l logger.Logger, migrationConfig *migration.Config) (*team_serv.TeamServer, *service.Service, *grpc.ClientConn, func(), *authz.PoliciesServiceServerMock) { t.Helper() serviceCerts := helpers.LoadDevCerts(t, "teams-service") connFactory := secureconn.NewFactory(*serviceCerts) authzCerts := helpers.LoadDevCerts(t, "authz-service") authzConnFactory := secureconn.NewFactory(*authzCerts) grpcAuthz := authzConnFactory.NewServer() mockPolicies := authz.NewPoliciesServiceServerMock() mockPolicies.PurgeSubjectFromPoliciesFunc = defaultMockPurgeFunc authz.RegisterPoliciesServiceServer(grpcAuthz, mockPolicies) mockAuthz := authz.NewAuthorizationServiceServerMock() mockAuthz.ValidateProjectAssignmentFunc = defaultValidateProjectAssignmentFunc authz.RegisterAuthorizationServiceServer(grpcAuthz, mockAuthz) authzServer := grpctest.NewServer(grpcAuthz) authzConn, err := authzConnFactory.Dial("authz-service", authzServer.URL) require.NoError(t, err) authzPoliciesClient := authz.NewPoliciesServiceClient(authzConn) authzAuthorizationClient := authz.NewAuthorizationServiceClient(authzConn) var serviceRef *service.Service if migrationConfig == nil { serviceRef, err = service.NewInMemoryService(l, connFactory, authzPoliciesClient) } else { serviceRef, err = service.NewPostgresService(l, connFactory, *migrationConfig, authzPoliciesClient, authzAuthorizationClient) } if err != nil { t.Fatalf("could not create server: %s", err) } grpcServ := serviceRef.ConnFactory.NewServer(tracing.GlobalServerInterceptor()) teamServer := team_serv.NewTeamServer(serviceRef) teams.RegisterTeamsServiceServer(grpcServ, teamServer) resetState(ctx, t, serviceRef) g := grpctest.NewServer(grpcServ) conn, err := connFactory.Dial("teams-service", g.URL) if err != nil { t.Fatalf("connecting to grpc endpoint: %s", err) } return teamServer, serviceRef, conn, func() { g.Close(); authzServer.Close() }, mockPolicies } func resetState(ctx context.Context, t *testing.T, serviceRef *service.Service) { t.Helper() if r, ok := serviceRef.Storage.(storage.Resetter); ok { err := r.Reset(ctx) require.NoError(t, err) } } func defaultMockPurgeFunc(context.Context, *authz.PurgeSubjectFromPoliciesReq) (*authz.PurgeSubjectFromPoliciesResp, error) { return &authz.PurgeSubjectFromPoliciesResp{}, nil } func defaultValidateProjectAssignmentFunc(context.Context, *authz.ValidateProjectAssignmentReq) (*authz.ValidateProjectAssignmentResp, error) { return &authz.ValidateProjectAssignmentResp{}, nil } func insertProjectsIntoNewContext(projects []string) context.Context { return auth_context.NewOutgoingProjectsContext(auth_context.NewContext(context.Background(), []string{}, projects, "resource", "action")) }
[ "\"CI\"" ]
[]
[ "CI" ]
[]
["CI"]
go
1
0
examples/mars_zx3/star-tracker/log.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os from koheron import command, connect import time import pdb class SkyTrackerInterface(object): def __init__(self, client): self.client = client # setter @command() def set_led_pwm(self, value): return self.client.recv_bool() @command() def set_backlash(self, axis, period_usec, ncycles, mode): return self.client.recv_bool() @command() def set_motor_mode(self, axis, isSlew, mode): return self.client.recv_bool() @command() def set_speed_ratio(self, axis, isSlew, isForward): return self.client.recv_bool() @command() def set_motor_highspeedmode(self, axis, isSlew, isHighSpeed): return self.client.recv_bool() @command() def set_motor_direction(self, axis, isSlew, isForward): return self.client.recv_bool() @command() def set_steps_per_rotation(self, axis, val_us): return self.client.recv_bool() @command() def set_current_position(self, axis, val): return self.client.recv_bool() @command() def set_min_period(self, axis, val_us): return self.client.recv_bool() @command() def set_max_period(self, axis, val_us): return self.client.recv_bool() @command() def set_motor_period_ticks(self, axis, isSlew, val): return self.client.recv_bool() @command() def set_motor_period_usec(self, axis, isSlew, val): return self.client.recv_bool() @command() def set_goto_target(self, axis, val): return self.client.recv_bool() @command() def set_goto_increment(self, axis, val): return self.client.recv_bool() # getters @command() def get_version(self): return self.client.recv_uint32() @command() def get_steps_per_rotation(self, axis): return self.client.recv_uint32() @command() def get_speed_ratio(self, axis, isSlew): return self.client.recv_double() @command() def get_backlash_period_ticks(self, axis): return self.client.recv_uint32() @command() def get_backlash_period_usec(self, axis): return self.client.recv_double() @command() def get_backlash_ncycles(self, axis): return self.client.recv_uint32() @command() def get_motor_highspeedmode(self, axis, isSlew): return self.client.recv_bool() @command() def get_motor_mode(self, axis, isSlew): return self.client.recv_uint32() @command() def get_motor_direction(self, axis, isSlew): return self.client.recv_bool() @command() def get_min_period_ticks(self, axis): return self.client.recv_uint32() @command() def get_max_period_ticks(self, axis): return self.client.recv_uint32() @command() def get_motor_period_usec(self, axis, isSlew): return self.client.recv_double() @command() def get_motor_period_ticks(self, axis, isSlew): return self.client.recv_uint32() @command() def get_raw_stepcount(self, axis): return self.client.recv_uint32() @command() def get_raw_status(self, axis): return self.client.recv_uint32() @command() def get_goto_increment(self, axis): return self.client.recv_uint32() @command() def get_goto_target(self, axis): return self.client.recv_uint32() # command @command() def enable_backlash(self, axis): return self.client.recv_bool() @command() def assign_raw_backlash(self, axis, ticks, ncycles, mode): return self.client.recv_bool() @command() def disable_raw_backlash(self, axis): return self.client.recv_bool() @command() def start_raw_tracking(self, isForward, periodticks, mode): return self.client.recv_bool() @command() def disable_raw_tracking(self, axis, instant): return self.client.recv_bool() @command() def send_raw_command(self, axis, isForward, ticks, mode, isGoTo, use_accel): return self.client.recv_bool() @command() def park_raw_telescope(self, axis, isForward, ticks, mode, use_accel): return self.client.recv_bool() @command() def cancel_raw_command(self, axis, instant): return self.client.recv_bool() # camera @command("CameraInterface") def close_shutter(self): return self.client.recv_bool() @command("CameraInterface") def open_shutter(self): return self.client.recv_bool() @command("CameraInterface") def get_cameratrigger_reg(self): return self.client.recv_uint8() @command("CameraInterface") def set_cameratrigger_reg(self, value): return self.client.recv_bool() # skywathcer interface @command("ASCOMInterface") def SwpCmdInitialize(self, axis): return self.client.recv_bool() @command("ASCOMInterface") def SwpGetBoardVersion(self): return self.client.recv_uint32() @command("ASCOMInterface") def SwpGetGridPerRevolution(self, axis): return self.client.recv_uint32() @command("ASCOMInterface") def SwpGetTimerInterruptFreq(self): return self.client.recv_uint32() @command("ASCOMInterface") def SwpGetHighSpeedRatio(self, axis): return self.client.recv_double() @command("ASCOMInterface") def SwpCmdStopAxis(self, axis, instant): return self.client.recv_bool() @command("ASCOMInterface") def SwpSetAxisPosition(self, axis, value): return self.client.recv_bool() @command("ASCOMInterface") def SwpGetAxisPosition(self, axis): return self.client.recv_uint32() @command("ASCOMInterface") def SwpSetMotionModeDirection(self, axis, isSlew, isForward, isHighSpeed): return self.client.recv_bool() @command("ASCOMInterface") def SwpSetGotoTarget(self, axis, targt): return self.client.recv_bool() @command("ASCOMInterface") def SwpSetGotoTargetIncrement(self, axis, ncycles): return self.client.recv_bool() @command("ASCOMInterface") def SwpSetStepPeriod(self, axis, isSlew, period_usec): return self.client.recv_bool() @command("ASCOMInterface") def SwpCmdStartMotion(self, axis, isSlew, use_accel, isGoto): return self.client.recv_bool() @command("ASCOMInterface") def SwpSetHomePosition(self, axis, period_usec): return self.client.recv_bool() @command("ASCOMInterface") def SwpGetAuxEncoder(self, axis): return self.client.recv_uint32() @command("ASCOMInterface") def SwpSetFeature(self, axis, cmd): return self.client.recv_bool() @command("ASCOMInterface") def SwpGetFeature(self, axis): return self.client.recv_uint32() def Initialize(self): for i in range(0, 2): print('\n\nset_min_period{0} : {1}'.format(i, self.set_min_period(i, 15))) print('set_led_pwm{0} : {1}'.format(i, self.set_led_pwm(100))) print('set_max_period{0} : {1}'.format(i, self.set_max_period(i, 268435.0))) print('set_backlash{0} : {1}'.format(i, self.set_backlash(i, 15.1, 127, 7))) print('set_steps_per_rotation{0} : {1}'.format(i, self.set_steps_per_rotation(i, 200*32*144*5))) print('set_current_position{0} : {1}'.format(i, self.set_current_position(i, 0x0))) print('set_goto_target{0} : {1}'.format(i, self.set_goto_target(i, 200*32))) print('set_goto_increment{0} : {1}'.format(i, self.set_goto_increment(i, 200*32))) print('==========================================') for j in range (0, 2): # print('set_motor_mode{0}-{1} : {2}'.format(i, j, self.set_motor_mode(i, j, 7))) print('set_speed_ratio{0}-{1} : {2}'.format(i, j, self.set_speed_ratio(i, j, 1))) print('set_motor_highspeedmode{0}-{1} : {2}'.format(i, j, self.set_motor_highspeedmode(i, j, True))) print('set_motor_direction{0}-{1} : {2}'.format(i, j, self.set_motor_direction(i, j, 0))) print('set_motor_period_usec{0}-{1} : {2}'.format(i, j, self.set_motor_period_usec(i, j, 15))) def PrintSpeed(self): for i in range (0, 2): for j in range (0, 2): print('get_motor_direction{0}-{1}: {2}'.format(i, j, self.get_motor_direction(i, j))) print('get_motor_period_usec{0}-{1}: {2}'.format(i, j, self.get_motor_period_usec(i, j))) def PrintAll(self): print("get_version: {0}".format(self.get_version())) for i in range (0, 2): print('\n\nget_backlash_period{0}: {1}'.format(i, self.get_backlash_period_ticks(i))) print('get_backlash_period_usec{0}: {1}'.format(i, self.get_backlash_period_usec(i))) print('get_backlash_ncycles{0}: {1}'.format(i, self.get_backlash_ncycles(i))) print('get_steps_per_rotation{0}: {1}'.format(i, self.get_steps_per_rotation(i))) print('get_max_period{0}: {1}'.format(i, self.get_max_period_ticks(i))) print('get_min_period{0}: {1}'.format(i, self.get_min_period_ticks(i))) print('get_raw_status{0}: {1}'.format(i, self.get_raw_status(i))) print('get_raw_stepcount{0}: {1}'.format(i, self.get_raw_stepcount(i))) print('get_goto_increment{0}: {1}'.format(i, self.get_goto_increment(i))) print('get_goto_target{0}: {1}'.format(i, self.get_goto_target(i))) print('=========================') for j in range (0, 2): print('get_spped_ratio{0}-{1}: {2}'.format(i, j, self.get_speed_ratio(i, j))) print('get_motor_highspeedmode{0}-{1}: {2}'.format(i, j, self.get_motor_highspeedmode(i, j))) print('get_motor_mode{0}-{1}: {2}'.format(i, j, self.get_motor_mode(i, j))) print('get_motor_direction{0}-{1}: {2}'.format(i, j, self.get_motor_direction(i, j))) print('get_motor_period_usec{0}-{1}: {2}'.format(i, j, self.get_motor_period_usec(i, j))) print('get_motor_period_ticks{0}-{1}: {2}'.format(i, j, self.get_motor_period_ticks(i, j))) def apply_siderail(self, axis): SKYWATCHER_STELLAR_DAY = 86164.098903691 SKYWATCHER_STELLAR_SPEED = 15.041067179 if __name__ == '__main__': host = os.getenv('HOST','192.168.1.122') client = connect(host, name='mars_star_tracker') driver = SkyTrackerInterface(client) driver.PrintAll() print('set_led_pwm{0} : {1}'.format(0, driver.set_led_pwm(20))) print('open_shutter{0} : {1}'.format(0, driver.open_shutter())) time.sleep(1) print('open_shutter{0} : {1}'.format(0, driver.close_shutter()))
[]
[]
[ "HOST" ]
[]
["HOST"]
python
1
0
Samples/AlembicViewer/alembic_viewer.py
#!/usr/bin/env python """Alembic Viewer launcher script.""" import os import sys if sys.version_info < (2, 7): raise Exception('alembic_viewer.py currently requires Python 2.7') from PySide import QtGui from FabricEngine import Core from FabricEngine.FabricUI import Style from FabricEngine.Canvas.FabricParser import FabricParser from AlembicViewer.AlembicViewerWindow import AlembicViewerWindow if __name__ == "__main__": # This only runs when launched directly from the command line. # A QApplication is setup and the Alembic Viewer Window is instanced and # attached to the QApplication and shown. # # Optional command line arguments for the initial directory are also # available to be called on startup. app = QtGui.QApplication("") Style.FabricStyleUtil.applyFabricStyle(app) app.setOrganizationName('Fabric Software Inc') app.setApplicationName('Alembic Viewer') app.setApplicationVersion('1.0.0') fabricDir = os.environ.get('FABRIC_DIR', None) if fabricDir: logoPath = os.path.join(fabricDir, 'Resources', 'fe_logo.png') app.setWindowIcon(QtGui.QIcon(logoPath)) parser = FabricParser() parser.add_argument('-d', '--initDir', action='store', dest='initDir', help='initial directory to open') args = parser.parse_args() initDir = args.initDir mainWin = AlembicViewerWindow(initDir=initDir) mainWin.show() alembicViewerGraphPath = os.path.join(fabricDir, 'Samples', 'Python', 'AlembicViewer', 'AlembicViewer.canvas') mainWin.loadGraph(alembicViewerGraphPath) app.exec_()
[]
[]
[ "FABRIC_DIR" ]
[]
["FABRIC_DIR"]
python
1
0
relay/relay_test.go
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package relay import ( "bytes" "context" "database/sql" "fmt" "io/ioutil" "os" "path/filepath" "strconv" "sync" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/siddontang/go-mysql/mysql" gmysql "github.com/siddontang/go-mysql/mysql" "github.com/siddontang/go-mysql/replication" "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/binlog/event" "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/streamer" "github.com/pingcap/dm/pkg/utils" "github.com/pingcap/dm/relay/reader" "github.com/pingcap/dm/relay/retry" "github.com/pingcap/dm/relay/transformer" "github.com/pingcap/dm/relay/writer" ) var _ = Suite(&testRelaySuite{}) func TestSuite(t *testing.T) { TestingT(t) } type testRelaySuite struct { } func (t *testRelaySuite) SetUpSuite(c *C) { c.Assert(log.InitLogger(&log.Config{}), IsNil) } func newRelayCfg(c *C, flavor string) *Config { dbCfg := getDBConfigForTest() return &Config{ EnableGTID: false, // position mode, so auto-positioning can work Flavor: flavor, RelayDir: c.MkDir(), ServerID: 12321, From: config.DBConfig{ Host: dbCfg.Host, Port: dbCfg.Port, User: dbCfg.User, Password: dbCfg.Password, }, ReaderRetry: retry.ReaderRetryConfig{ BackoffRollback: 200 * time.Millisecond, BackoffMax: 1 * time.Second, BackoffMin: 1 * time.Millisecond, BackoffJitter: true, BackoffFactor: 2, }, } } func getDBConfigForTest() config.DBConfig { host := os.Getenv("MYSQL_HOST") if host == "" { host = "127.0.0.1" } port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT")) if port == 0 { port = 3306 } user := os.Getenv("MYSQL_USER") if user == "" { user = "root" } password := os.Getenv("MYSQL_PSWD") return config.DBConfig{ Host: host, Port: port, User: user, Password: password, } } func openDBForTest() (*sql.DB, error) { cfg := getDBConfigForTest() dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8mb4", cfg.User, cfg.Password, cfg.Host, cfg.Port) return sql.Open("mysql", dsn) } // mockReader is used only for relay testing. type mockReader struct { result reader.Result err error } func (r *mockReader) Start() error { return nil } func (r *mockReader) Close() error { return nil } func (r *mockReader) GetEvent(ctx context.Context) (reader.Result, error) { select { case <-ctx.Done(): return reader.Result{}, ctx.Err() default: } return r.result, r.err } // mockWriter is used only for relay testing. type mockWriter struct { result writer.Result err error latestEvent *replication.BinlogEvent } func (w *mockWriter) Start() error { return nil } func (w *mockWriter) Close() error { return nil } func (w *mockWriter) Recover(ctx context.Context) (writer.RecoverResult, error) { return writer.RecoverResult{}, nil } func (w *mockWriter) WriteEvent(ev *replication.BinlogEvent) (writer.Result, error) { w.latestEvent = ev // hold it return w.result, w.err } func (w *mockWriter) Flush() error { return nil } func (t *testRelaySuite) TestTryRecoverLatestFile(c *C) { var ( uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" uuidWithSuffix = fmt.Sprintf("%s.000001", uuid) previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-17,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:1-456" // 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 --> 406a3f61-690d-11e7-87c5-6c92bf46f384:1-456 greaterGITDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-20,53bfca22-690d-11e7-8a62-18ded7a37b78:1-510,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" filename = "mysql-bin.000001" startPos = gmysql.Position{Name: filename, Pos: 123} parser2 = parser.New() relayCfg = newRelayCfg(c, mysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) c.Assert(r.Init(context.Background()), IsNil) // purge old relay dir f, err := os.Create(filepath.Join(r.cfg.RelayDir, "old_relay_log")) c.Assert(err, IsNil) f.Close() c.Assert(r.PurgeRelayDir(), IsNil) files, err := ioutil.ReadDir(r.cfg.RelayDir) c.Assert(err, IsNil) c.Assert(files, HasLen, 0) c.Assert(r.meta.Load(), IsNil) // no file specified, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // save position into meta c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) // relay log file does not exists, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // use a generator to generate some binlog events previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) c.Assert(err, IsNil) latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) c.Assert(err, IsNil) latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) c.Assert(err, IsNil) g, _, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) // write events into relay log file err = ioutil.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0600) c.Assert(err, IsNil) // all events/transactions are complete, no need to recover c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) // now, we do not update position/GTID set in meta if not recovered t.verifyMetadata(c, r, uuidWithSuffix, startPos, "", []string{uuidWithSuffix}) // write some invalid data into the relay log file f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // write a greater GTID sets in meta greaterGITDSet, err := gtid.ParserGTID(relayCfg.Flavor, greaterGITDSetStr) c.Assert(err, IsNil) c.Assert(r.SaveMeta(startPos, greaterGITDSet), IsNil) // invalid data truncated, meta updated c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos := r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs := r.meta.GTID() recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) c.Assert(err, IsNil) c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) // verifyMetadata is not enough // no relay log file need to recover c.Assert(r.SaveMeta(minCheckpoint, latestGTIDs), IsNil) c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos = r.meta.Pos() c.Assert(latestPos, DeepEquals, minCheckpoint) _, latestGTIDs = r.meta.GTID() c.Assert(latestGTIDs.Contain(g.LatestGTID), IsTrue) } func (t *testRelaySuite) TestTryRecoverMeta(c *C) { var ( uuid = "24ecd093-8cec-11e9-aa0d-0242ac170002" previousGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,406a3f61-690d-11e7-87c5-6c92bf46f384:123-456" latestGTIDStr1 = "3ccc475b-2343-11e7-be21-6c0b84d59f30:14" latestGTIDStr2 = "53bfca22-690d-11e7-8a62-18ded7a37b78:495" recoverGTIDSetStr = "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-17,53bfca22-690d-11e7-8a62-18ded7a37b78:1-505,406a3f61-690d-11e7-87c5-6c92bf46f384:1-456" // 406a3f61-690d-11e7-87c5-6c92bf46f384:123-456 --> 406a3f61-690d-11e7-87c5-6c92bf46f384:1-456 filename = "mysql-bin.000001" startPos = gmysql.Position{Name: filename, Pos: 123} parser2 = parser.New() relayCfg = newRelayCfg(c, mysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) c.Assert(r.Init(context.Background()), IsNil) recoverGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, recoverGTIDSetStr) c.Assert(err, IsNil) c.Assert(r.meta.AddDir(uuid, &startPos, nil, 0), IsNil) c.Assert(r.meta.Load(), IsNil) // use a generator to generate some binlog events previousGTIDSet, err := gtid.ParserGTID(relayCfg.Flavor, previousGTIDSetStr) c.Assert(err, IsNil) latestGTID1, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr1) c.Assert(err, IsNil) latestGTID2, err := gtid.ParserGTID(relayCfg.Flavor, latestGTIDStr2) c.Assert(err, IsNil) g, _, data := genBinlogEventsWithGTIDs(c, relayCfg.Flavor, previousGTIDSet, latestGTID1, latestGTID2) // write events into relay log file err = ioutil.WriteFile(filepath.Join(r.meta.Dir(), filename), data, 0600) c.Assert(err, IsNil) // write some invalid data into the relay log file to trigger a recover. f, err := os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // recover with empty GTIDs. c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos := r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs := r.meta.GTID() c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) // write some invalid data into the relay log file again. f, err = os.OpenFile(filepath.Join(r.meta.Dir(), filename), os.O_WRONLY|os.O_APPEND, 0600) c.Assert(err, IsNil) _, err = f.Write([]byte("invalid event data")) c.Assert(err, IsNil) f.Close() // recover with the subset of GTIDs (previous GTID set). c.Assert(r.SaveMeta(startPos, previousGTIDSet), IsNil) c.Assert(r.tryRecoverLatestFile(context.Background(), parser2), IsNil) _, latestPos = r.meta.Pos() c.Assert(latestPos, DeepEquals, gmysql.Position{Name: filename, Pos: g.LatestPos}) _, latestGTIDs = r.meta.GTID() c.Assert(latestGTIDs.Equal(recoverGTIDSet), IsTrue) } // genBinlogEventsWithGTIDs generates some binlog events used by testFileUtilSuite and testFileWriterSuite. // now, its generated events including 3 DDL and 10 DML. func genBinlogEventsWithGTIDs(c *C, flavor string, previousGTIDSet, latestGTID1, latestGTID2 gtid.Set) (*event.Generator, []*replication.BinlogEvent, []byte) { var ( serverID uint32 = 11 latestPos uint32 latestXID uint64 = 10 allEvents = make([]*replication.BinlogEvent, 0, 50) allData bytes.Buffer ) // use a binlog event generator to generate some binlog events. g, err := event.NewGenerator(flavor, serverID, latestPos, latestGTID1, previousGTIDSet, latestXID) c.Assert(err, IsNil) // file header with FormatDescriptionEvent and PreviousGTIDsEvent events, data, err := g.GenFileHeader() c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) // CREATE DATABASE/TABLE, 3 DDL queries := []string{ "CREATE DATABASE `db`", "CREATE TABLE `db`.`tbl1` (c1 INT)", "CREATE TABLE `db`.`tbl2` (c1 INT)", } for _, query := range queries { events, data, err = g.GenDDLEvents("db", query) c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) } // DMLs, 10 DML g.LatestGTID = latestGTID2 // use another latest GTID with different SID/DomainID var ( tableID uint64 = 8 columnType = []byte{gmysql.MYSQL_TYPE_LONG} eventType = replication.WRITE_ROWS_EVENTv2 schema = "db" table = "tbl1" ) for i := 0; i < 10; i++ { insertRows := make([][]interface{}, 0, 1) insertRows = append(insertRows, []interface{}{int32(i)}) dmlData := []*event.DMLData{ { TableID: tableID, Schema: schema, Table: table, ColumnType: columnType, Rows: insertRows, }, } events, data, err = g.GenDMLEvents(eventType, dmlData) c.Assert(err, IsNil) allEvents = append(allEvents, events...) allData.Write(data) } return g, allEvents, allData.Bytes() } func (t *testRelaySuite) TestHandleEvent(c *C) { // NOTE: we can test metrics later. var ( reader2 = &mockReader{} transformer2 = transformer.NewTransformer(parser.New()) writer2 = &mockWriter{} relayCfg = newRelayCfg(c, mysql.MariaDBFlavor) r = NewRelay(relayCfg).(*Relay) eventHeader = &replication.EventHeader{ Timestamp: uint32(time.Now().Unix()), ServerID: 11, } binlogPos = gmysql.Position{Name: "mysql-bin.666888", Pos: 4} rotateEv, _ = event.GenRotateEvent(eventHeader, 123, []byte(binlogPos.Name), uint64(binlogPos.Pos)) queryEv, _ = event.GenQueryEvent(eventHeader, 123, 0, 0, 0, nil, nil, []byte("CREATE DATABASE db_relay_test")) ) c.Assert(r.Init(context.Background()), IsNil) // NOTE: we can mock meta later. c.Assert(r.meta.Load(), IsNil) c.Assert(r.meta.AddDir("24ecd093-8cec-11e9-aa0d-0242ac170002", nil, nil, 0), IsNil) // attach GTID sets to QueryEv queryEv2 := queryEv.Event.(*replication.QueryEvent) queryEv2.GSet, _ = gmysql.ParseGTIDSet(relayCfg.Flavor, "1-2-3") ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() // reader return with an error for _, reader2.err = range []error{ errors.New("reader error for testing"), replication.ErrChecksumMismatch, replication.ErrSyncClosed, replication.ErrNeedSyncAgain, } { err := r.handleEvents(ctx, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, reader2.err) } // reader return valid event reader2.err = nil reader2.result.Event = rotateEv // writer return error writer2.err = errors.New("writer error for testing") // return with the annotated writer error err := r.handleEvents(ctx, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, writer2.err) // writer without error writer2.err = nil err = r.handleEvents(ctx, reader2, transformer2, writer2) // returned when ctx timeout c.Assert(errors.Cause(err), Equals, ctx.Err()) // check written event c.Assert(writer2.latestEvent, Equals, reader2.result.Event) // check meta _, pos := r.meta.Pos() _, gs := r.meta.GTID() c.Assert(pos, DeepEquals, binlogPos) c.Assert(gs.String(), Equals, "") // no GTID sets in event yet ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel2() // write a QueryEvent with GTID sets reader2.result.Event = queryEv err = r.handleEvents(ctx2, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) // check written event c.Assert(writer2.latestEvent, Equals, reader2.result.Event) // check meta _, pos = r.meta.Pos() _, gs = r.meta.GTID() c.Assert(pos.Name, Equals, binlogPos.Name) c.Assert(pos.Pos, Equals, queryEv.Header.LogPos) c.Assert(gs.Origin(), DeepEquals, queryEv2.GSet) // got GTID sets // transformer return ignorable for the event reader2.err = nil reader2.result.Event = &replication.BinlogEvent{ Header: &replication.EventHeader{EventType: replication.HEARTBEAT_EVENT}, Event: &replication.GenericEvent{}} ctx4, cancel4 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel4() err = r.handleEvents(ctx4, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) select { case <-ctx4.Done(): default: c.Fatalf("ignorable event for transformer not ignored") } // writer return ignorable for the event reader2.result.Event = queryEv writer2.result.Ignore = true ctx5, cancel5 := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel5() err = r.handleEvents(ctx5, reader2, transformer2, writer2) c.Assert(errors.Cause(err), Equals, ctx.Err()) select { case <-ctx5.Done(): default: c.Fatalf("ignorable event for writer not ignored") } } func (t *testRelaySuite) TestReSetupMeta(c *C) { ctx, cancel := context.WithTimeout(context.Background(), utils.DefaultDBTimeout) defer cancel() var ( relayCfg = newRelayCfg(c, mysql.MySQLFlavor) r = NewRelay(relayCfg).(*Relay) ) c.Assert(r.Init(context.Background()), IsNil) // empty metadata c.Assert(r.meta.Load(), IsNil) t.verifyMetadata(c, r, "", minCheckpoint, "", nil) // open connected DB and get its UUID db, err := openDBForTest() c.Assert(err, IsNil) r.db = db defer func() { r.db.Close() r.db = nil }() uuid, err := utils.GetServerUUID(ctx, r.db, r.cfg.Flavor) c.Assert(err, IsNil) // re-setup meta with start pos adjusted r.cfg.EnableGTID = true r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-23" r.cfg.BinLogName = "mysql-bin.000005" c.Assert(r.setSyncConfig(), IsNil) // all adjusted gset should be empty since we didn't flush logs emptyGTID, err := gtid.ParserGTID(r.cfg.Flavor, "") c.Assert(err, IsNil) c.Assert(r.reSetupMeta(ctx), IsNil) uuid001 := fmt.Sprintf("%s.000001", uuid) t.verifyMetadata(c, r, uuid001, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid001}) // re-setup meta again, often happen when connecting a server behind a VIP. c.Assert(r.reSetupMeta(ctx), IsNil) uuid002 := fmt.Sprintf("%s.000002", uuid) t.verifyMetadata(c, r, uuid002, minCheckpoint, emptyGTID.String(), []string{uuid001, uuid002}) r.cfg.BinLogName = "mysql-bin.000002" r.cfg.BinlogGTID = "24ecd093-8cec-11e9-aa0d-0242ac170002:1-50,24ecd093-8cec-11e9-aa0d-0242ac170003:1-50" r.cfg.UUIDSuffix = 2 c.Assert(r.reSetupMeta(ctx), IsNil) t.verifyMetadata(c, r, uuid002, gmysql.Position{Name: r.cfg.BinLogName, Pos: 4}, emptyGTID.String(), []string{uuid002}) // re-setup meta again, often happen when connecting a server behind a VIP. c.Assert(r.reSetupMeta(ctx), IsNil) uuid003 := fmt.Sprintf("%s.000003", uuid) t.verifyMetadata(c, r, uuid003, minCheckpoint, emptyGTID.String(), []string{uuid002, uuid003}) } func (t *testRelaySuite) verifyMetadata(c *C, r *Relay, uuidExpected string, posExpected gmysql.Position, gsStrExpected string, uuidsExpected []string) { uuid, pos := r.meta.Pos() _, gs := r.meta.GTID() gsExpected, err := gtid.ParserGTID(mysql.MySQLFlavor, gsStrExpected) c.Assert(err, IsNil) c.Assert(uuid, Equals, uuidExpected) c.Assert(pos, DeepEquals, posExpected) c.Assert(gs.Equal(gsExpected), IsTrue) indexFile := filepath.Join(r.cfg.RelayDir, utils.UUIDIndexFilename) UUIDs, err := utils.ParseUUIDIndex(indexFile) c.Assert(err, IsNil) c.Assert(UUIDs, DeepEquals, uuidsExpected) } func (t *testRelaySuite) TestProcess(c *C) { var ( dbCfg = getDBConfigForTest() relayCfg = &Config{ EnableGTID: false, // position mode, so auto-positioning can work Flavor: gmysql.MySQLFlavor, RelayDir: c.MkDir(), ServerID: 12321, From: config.DBConfig{ Host: dbCfg.Host, Port: dbCfg.Port, User: dbCfg.User, Password: dbCfg.Password, }, ReaderRetry: retry.ReaderRetryConfig{ BackoffRollback: 200 * time.Millisecond, BackoffMax: 1 * time.Second, BackoffMin: 1 * time.Millisecond, BackoffJitter: true, BackoffFactor: 2, }, } r = NewRelay(relayCfg).(*Relay) ) ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup err := r.Init(ctx) c.Assert(err, IsNil) wg.Add(1) go func() { defer wg.Done() err2 := r.process(ctx) if !utils.IsErrBinlogPurged(err2) { // we can tolerate `ERROR 1236` caused by `RESET MASTER` in other test cases. c.Assert(err2, IsNil) } }() time.Sleep(1 * time.Second) // waiting for get events from upstream // kill the binlog dump connection ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second) defer cancel2() var connID uint32 c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool { connID, err = getBinlogDumpConnID(ctx2, r.db) return err == nil }), IsTrue) _, err = r.db.ExecContext(ctx2, fmt.Sprintf(`KILL %d`, connID)) c.Assert(err, IsNil) // execute a DDL again lastDDL := "CREATE DATABASE `test_relay_retry_db`" _, err = r.db.ExecContext(ctx2, lastDDL) c.Assert(err, IsNil) defer func() { query := "DROP DATABASE IF EXISTS `test_relay_retry_db`" _, err = r.db.ExecContext(ctx2, query) c.Assert(err, IsNil) }() time.Sleep(2 * time.Second) // waiting for events cancel() // stop processing wg.Wait() // should got the last DDL gotLastDDL := false onEventFunc := func(e *replication.BinlogEvent) error { switch ev := e.Event.(type) { case *replication.QueryEvent: if bytes.Contains(ev.Query, []byte(lastDDL)) { gotLastDDL = true } } return nil } parser2 := replication.NewBinlogParser() parser2.SetVerifyChecksum(true) // check whether have binlog file in relay directory // and check for events already done in `TestHandleEvent` uuid, err := utils.GetServerUUID(ctx2, r.db, r.cfg.Flavor) c.Assert(err, IsNil) files, err := streamer.CollectAllBinlogFiles(filepath.Join(relayCfg.RelayDir, fmt.Sprintf("%s.000001", uuid))) c.Assert(err, IsNil) var binlogFileCount int for _, f := range files { if binlog.VerifyFilename(f) { binlogFileCount++ if !gotLastDDL { err = parser2.ParseFile(filepath.Join(relayCfg.RelayDir, fmt.Sprintf("%s.000001", uuid), f), 0, onEventFunc) c.Assert(err, IsNil) } } } c.Assert(binlogFileCount, Greater, 0) c.Assert(gotLastDDL, IsTrue) } // getBinlogDumpConnID gets the `Binlog Dump` connection ID. // now only return the first one. func getBinlogDumpConnID(ctx context.Context, db *sql.DB) (uint32, error) { query := `SHOW PROCESSLIST` rows, err := db.QueryContext(ctx, query) if err != nil { return 0, err } defer rows.Close() var ( id sql.NullInt64 user sql.NullString host sql.NullString db2 sql.NullString command sql.NullString time2 sql.NullInt64 state sql.NullString info sql.NullString ) for rows.Next() { err = rows.Scan(&id, &user, &host, &db2, &command, &time2, &state, &info) if err != nil { return 0, err } if id.Valid && command.Valid && command.String == "Binlog Dump" { return uint32(id.Int64), rows.Err() } } return 0, errors.NotFoundf("Binlog Dump") }
[ "\"MYSQL_HOST\"", "\"MYSQL_PORT\"", "\"MYSQL_USER\"", "\"MYSQL_PSWD\"" ]
[]
[ "MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST" ]
[]
["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"]
go
4
0
examples/basics/cifar-convnet.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: cifar-convnet.py # Author: Yuxin Wu import tensorflow as tf import argparse import os from tensorpack import * from tensorpack.tfutils.summary import * from tensorpack.dataflow import dataset from tensorpack.utils.gpu import get_num_gpu """ A small convnet model for Cifar10 or Cifar100 dataset. Cifar10 trained on 1 GPU: 91% accuracy after 50k iterations. 79 itr/s on P100 Not a good model for Cifar100, just for demonstration. """ class Model(ModelDesc): def __init__(self, cifar_classnum): super(Model, self).__init__() self.cifar_classnum = cifar_classnum def inputs(self): return [tf.placeholder(tf.float32, (None, 30, 30, 3), 'input'), tf.placeholder(tf.int32, (None,), 'label')] def build_graph(self, image, label): is_training = get_current_tower_context().is_training keep_prob = tf.constant(0.5 if is_training else 1.0) if is_training: tf.summary.image("train_image", image, 10) if tf.test.is_gpu_available(): image = tf.transpose(image, [0, 3, 1, 2]) data_format = 'channels_first' else: data_format = 'channels_last' image = image / 4.0 # just to make range smaller with argscope(Conv2D, activation=BNReLU, use_bias=False, kernel_size=3), \ argscope([Conv2D, MaxPooling, BatchNorm], data_format=data_format): logits = LinearWrap(image) \ .Conv2D('conv1.1', filters=64) \ .Conv2D('conv1.2', filters=64) \ .MaxPooling('pool1', 3, stride=2, padding='SAME') \ .Conv2D('conv2.1', filters=128) \ .Conv2D('conv2.2', filters=128) \ .MaxPooling('pool2', 3, stride=2, padding='SAME') \ .Conv2D('conv3.1', filters=128, padding='VALID') \ .Conv2D('conv3.2', filters=128, padding='VALID') \ .FullyConnected('fc0', 1024 + 512, activation=tf.nn.relu) \ .tf.nn.dropout(keep_prob) \ .FullyConnected('fc1', 512, activation=tf.nn.relu) \ .FullyConnected('linear', out_dim=self.cifar_classnum)() cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') correct = tf.to_float(tf.nn.in_top_k(logits, label, 1), name='correct') # monitor training error add_moving_summary(tf.reduce_mean(correct, name='accuracy')) # weight decay on all W of fc layers wd_cost = regularize_cost('fc.*/W', l2_regularizer(4e-4), name='regularize_loss') add_moving_summary(cost, wd_cost) add_param_summary(('.*/W', ['histogram'])) # monitor W return tf.add_n([cost, wd_cost], name='cost') def optimizer(self): lr = tf.get_variable('learning_rate', initializer=1e-2, trainable=False) tf.summary.scalar('lr', lr) return tf.train.AdamOptimizer(lr, epsilon=1e-3) def get_data(train_or_test, cifar_classnum): isTrain = train_or_test == 'train' if cifar_classnum == 10: ds = dataset.Cifar10(train_or_test) else: ds = dataset.Cifar100(train_or_test) if isTrain: augmentors = [ imgaug.RandomCrop((30, 30)), imgaug.Flip(horiz=True), imgaug.Brightness(63), imgaug.Contrast((0.2, 1.8)), imgaug.MeanVarianceNormalize(all_channel=True) ] else: augmentors = [ imgaug.CenterCrop((30, 30)), imgaug.MeanVarianceNormalize(all_channel=True) ] ds = AugmentImageComponent(ds, augmentors) ds = BatchData(ds, 128, remainder=not isTrain) if isTrain: ds = PrefetchDataZMQ(ds, 5) return ds def get_config(cifar_classnum): # prepare dataset dataset_train = get_data('train', cifar_classnum) dataset_test = get_data('test', cifar_classnum) def lr_func(lr): if lr < 3e-5: raise StopTraining() return lr * 0.31 return TrainConfig( model=Model(cifar_classnum), dataflow=dataset_train, callbacks=[ ModelSaver(), InferenceRunner(dataset_test, ScalarStats(['accuracy', 'cost'])), StatMonitorParamSetter('learning_rate', 'validation_accuracy', lr_func, threshold=0.001, last_k=10, reverse=True), ], max_epoch=150, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', required=True) parser.add_argument('--load', help='load model') parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100', type=int, default=10) args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu with tf.Graph().as_default(): logger.set_logger_dir(os.path.join('train_log', 'cifar' + str(args.classnum))) config = get_config(args.classnum) if args.load: config.session_init = SaverRestore(args.load) num_gpu = get_num_gpu() trainer = QueueInputTrainer() if num_gpu <= 1 \ else SyncMultiGPUTrainerParameterServer(num_gpu) launch_train_with_config(config, trainer)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
cli/main.go
package main import ( "os" log "github.com/Sirupsen/logrus" "github.com/cnbm/container-orchestration/cli/cmd" ) func main() { if envd := os.Getenv("DEBUG"); envd != "" { log.SetLevel(log.DebugLevel) } cmd.Execute() }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
cmd/easyalert/main.go
package main import ( "database/sql" "fmt" "os" "github.com/bakku/easyalert/postgres" "github.com/bakku/easyalert/web" _ "github.com/lib/pq" ) func main() { port := os.Getenv("PORT") if port == "" { fmt.Println("no PORT env given") return } dbConnStr := os.Getenv("DATABASE_URL") if dbConnStr == "" { fmt.Println("no DATABASE_URL env given") return } db, err := sql.Open("postgres", dbConnStr) if err != nil { fmt.Println("error while connecting to database:", err) return } err = db.Ping() if err != nil { fmt.Println("error while pinging database:", err) return } userRepo := postgres.UserRepository{db} alertRepo := postgres.AlertRepository{db} server := web.NewServer(port, userRepo, alertRepo) server.Start() }
[ "\"PORT\"", "\"DATABASE_URL\"" ]
[]
[ "PORT", "DATABASE_URL" ]
[]
["PORT", "DATABASE_URL"]
go
2
0
v1/Bot/delete.py
import requests import json import os #from deleteGoogleDrive import delete_file payload = json.dumps({ "properties": { "Title": { "rich_text": [ { "text": { "content": "" } } ] }, "Tag": { "type": "multi_select", "multi_select": [ { "name": " " } ] }, "Contributor": { "type": "title", "title": [ { "text": { "content": "" } } ] }, "URL": { "url": None } } }) headers = { 'Authorization': os.environ["AUTH_KEY"], 'Notion-Version': '2021-05-13', 'Content-Type': 'application/json' } def deleteMe(searchObj_toDelete): # Deleting the record/page of the table using PATCH global headers url = f"https://api.notion.com/v1/pages/{searchObj_toDelete.id}" response = requests.request("PATCH", url, headers=headers, data=payload) print(response.content) #Check if the url of the object is a drive link url_of_obj = searchObj_toDelete.url if("drive" in url_of_obj and "file" in url_of_obj): file_id = url_of_obj.split('/')[-1] delete_file(file_id) # Future self raghavTinker: # The deletion is essentially a soft delete. Basically the record will have all of its field changed to " " as seen in the payload # Also as for the googleDrive delete. If the file isnt owned by the user the deletion function will exit gracefully and the record will still be wiped
[]
[]
[ "AUTH_KEY" ]
[]
["AUTH_KEY"]
python
1
0
src/pip/_internal/utils/misc.py
# The following comment should be removed at some point in the future. # mypy: strict-optional=False from __future__ import absolute_import import contextlib import errno import getpass import io import logging import os import posixpath import re import shutil import stat import subprocess import sys import tarfile import zipfile from collections import deque from pip._vendor import pkg_resources # NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is # why we ignore the type on this import. from pip._vendor.retrying import retry # type: ignore from pip._vendor.six import PY2, text_type from pip._vendor.six.moves import input, shlex_quote from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote from pip import __version__ from pip._internal.exceptions import CommandError, InstallationError from pip._internal.locations import ( get_major_minor_version, site_packages, user_site, ) from pip._internal.utils.compat import ( WINDOWS, console_to_str, expanduser, stdlib_pkgs, str_to_display, ) from pip._internal.utils.marker_files import write_delete_marker_file from pip._internal.utils.typing import MYPY_CHECK_RUNNING from pip._internal.utils.virtualenv import ( running_under_virtualenv, virtualenv_no_global, ) if PY2: from io import BytesIO as StringIO else: from io import StringIO if MYPY_CHECK_RUNNING: from typing import ( Any, AnyStr, Container, Iterable, List, Mapping, Match, Optional, Text, Tuple, Union, cast, ) from pip._vendor.pkg_resources import Distribution from pip._internal.models.link import Link from pip._internal.utils.ui import SpinnerInterface VersionInfo = Tuple[int, int, int] else: # typing's cast() is needed at runtime, but we don't want to import typing. # Thus, we use a dummy no-op version, which we tell mypy to ignore. def cast(type_, value): # type: ignore return value __all__ = ['rmtree', 'display_path', 'backup_dir', 'ask', 'splitext', 'format_size', 'is_installable_dir', 'is_svn_page', 'file_contents', 'split_leading_dir', 'has_leading_dir', 'normalize_path', 'renames', 'get_prog', 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', 'captured_stdout', 'ensure_dir', 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION', 'get_installed_version', 'remove_auth_from_url'] logger = logging.getLogger(__name__) subprocess_logger = logging.getLogger('pip.subprocessor') LOG_DIVIDER = '----------------------------------------' WHEEL_EXTENSION = '.whl' BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION) TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') ARCHIVE_EXTENSIONS = ( ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS try: import bz2 # noqa SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS except ImportError: logger.debug('bz2 module is not available') try: # Only for Python 3.3+ import lzma # noqa SUPPORTED_EXTENSIONS += XZ_EXTENSIONS except ImportError: logger.debug('lzma module is not available') def get_pip_version(): # type: () -> str pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") pip_pkg_dir = os.path.abspath(pip_pkg_dir) return ( 'pip {} from {} (python {})'.format( __version__, pip_pkg_dir, get_major_minor_version(), ) ) def normalize_version_info(py_version_info): # type: (Tuple[int, ...]) -> Tuple[int, int, int] """ Convert a tuple of ints representing a Python version to one of length three. :param py_version_info: a tuple of ints representing a Python version, or None to specify no version. The tuple can have any length. :return: a tuple of length three if `py_version_info` is non-None. Otherwise, return `py_version_info` unchanged (i.e. None). """ if len(py_version_info) < 3: py_version_info += (3 - len(py_version_info)) * (0,) elif len(py_version_info) > 3: py_version_info = py_version_info[:3] return cast('VersionInfo', py_version_info) def ensure_dir(path): # type: (AnyStr) -> None """os.path.makedirs without EEXIST.""" try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def get_prog(): # type: () -> str try: prog = os.path.basename(sys.argv[0]) if prog in ('__main__.py', '-c'): return "%s -m pip" % sys.executable else: return prog except (AttributeError, TypeError, IndexError): pass return 'pip' # Retry every half second for up to 3 seconds @retry(stop_max_delay=3000, wait_fixed=500) def rmtree(dir, ignore_errors=False): # type: (str, bool) -> None shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) def rmtree_errorhandler(func, path, exc_info): """On Windows, the files in .svn are read-only, so when rmtree() tries to remove them, an exception is thrown. We catch that here, remove the read-only attribute, and hopefully continue without problems.""" # if file type currently read only if os.stat(path).st_mode & stat.S_IREAD: # convert to read/write os.chmod(path, stat.S_IWRITE) # use the original function to repeat the operation func(path) return else: raise def path_to_display(path): # type: (Optional[Union[str, Text]]) -> Optional[Text] """ Convert a bytes (or text) path to text (unicode in Python 2) for display and logging purposes. This function should never error out. Also, this function is mainly needed for Python 2 since in Python 3 str paths are already text. """ if path is None: return None if isinstance(path, text_type): return path # Otherwise, path is a bytes object (str in Python 2). try: display_path = path.decode(sys.getfilesystemencoding(), 'strict') except UnicodeDecodeError: # Include the full bytes to make troubleshooting easier, even though # it may not be very human readable. if PY2: # Convert the bytes to a readable str representation using # repr(), and then convert the str to unicode. # Also, we add the prefix "b" to the repr() return value both # to make the Python 2 output look like the Python 3 output, and # to signal to the user that this is a bytes representation. display_path = str_to_display('b{!r}'.format(path)) else: # Silence the "F821 undefined name 'ascii'" flake8 error since # in Python 3 ascii() is a built-in. display_path = ascii(path) # noqa: F821 return display_path def display_path(path): # type: (Union[str, Text]) -> str """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) if sys.version_info[0] == 2: path = path.decode(sys.getfilesystemencoding(), 'replace') path = path.encode(sys.getdefaultencoding(), 'replace') if path.startswith(os.getcwd() + os.path.sep): path = '.' + path[len(os.getcwd()):] return path def backup_dir(dir, ext='.bak'): # type: (str, str) -> str """Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc)""" n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extension def ask_path_exists(message, options): # type: (str, Iterable[str]) -> str for action in os.environ.get('PIP_EXISTS_ACTION', '').split(): if action in options: return action return ask(message, options) def _check_no_input(message): # type: (str) -> None """Raise an error if no input is allowed.""" if os.environ.get('PIP_NO_INPUT'): raise Exception( 'No input was expected ($PIP_NO_INPUT set); question: %s' % message ) def ask(message, options): # type: (str, Iterable[str]) -> str """Ask the message interactively, with the given possible responses""" while 1: _check_no_input(message) response = input(message) response = response.strip().lower() if response not in options: print( 'Your response (%r) was not one of the expected responses: ' '%s' % (response, ', '.join(options)) ) else: return response def ask_input(message): # type: (str) -> str """Ask for input interactively.""" _check_no_input(message) return input(message) def ask_password(message): # type: (str) -> str """Ask for a password interactively.""" _check_no_input(message) return getpass.getpass(message) def format_size(bytes): # type: (float) -> str if bytes > 1000 * 1000: return '%.1fMB' % (bytes / 1000.0 / 1000) elif bytes > 10 * 1000: return '%ikB' % (bytes / 1000) elif bytes > 1000: return '%.1fkB' % (bytes / 1000.0) else: return '%ibytes' % bytes def is_installable_dir(path): # type: (str) -> bool """Is path is a directory containing setup.py or pyproject.toml? """ if not os.path.isdir(path): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True pyproject_toml = os.path.join(path, 'pyproject.toml') if os.path.isfile(pyproject_toml): return True return False def is_svn_page(html): # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]] """ Returns true if the page appears to be the index page of an svn repository """ return (re.search(r'<title>[^<]*Revision \d+:', html) and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) def file_contents(filename): # type: (str) -> Text with open(filename, 'rb') as fp: return fp.read().decode('utf-8') def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): """Yield pieces of data from a file-like object until EOF.""" while True: chunk = file.read(size) if not chunk: break yield chunk def split_leading_dir(path): # type: (Union[str, Text]) -> List[Union[str, Text]] path = path.lstrip('/').lstrip('\\') if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path): return path.split('/', 1) elif '\\' in path: return path.split('\\', 1) else: return [path, ''] def has_leading_dir(paths): # type: (Iterable[Union[str, Text]]) -> bool """Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive)""" common_prefix = None for path in paths: prefix, rest = split_leading_dir(path) if not prefix: return False elif common_prefix is None: common_prefix = prefix elif prefix != common_prefix: return False return True def normalize_path(path, resolve_symlinks=True): # type: (str, bool) -> str """ Convert a path to its canonical, case-normalized, absolute version. """ path = expanduser(path) if resolve_symlinks: path = os.path.realpath(path) else: path = os.path.abspath(path) return os.path.normcase(path) def splitext(path): # type: (str) -> Tuple[str, str] """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext def renames(old, new): # type: (str, str) -> None """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) if head and tail and not os.path.exists(head): os.makedirs(head) shutil.move(old, new) head, tail = os.path.split(old) if head and tail: try: os.removedirs(head) except OSError: pass def is_local(path): # type: (str) -> bool """ Return True if path is within sys.prefix, if we're running in a virtualenv. If we're not in a virtualenv, all paths are considered "local." """ if not running_under_virtualenv(): return True return normalize_path(path).startswith(normalize_path(sys.prefix)) def dist_is_local(dist): # type: (Distribution) -> bool """ Return True if given Distribution object is installed locally (i.e. within current virtualenv). Always True if we're not in a virtualenv. """ return is_local(dist_location(dist)) def dist_in_usersite(dist): # type: (Distribution) -> bool """ Return True if given Distribution is installed in user site. """ norm_path = normalize_path(dist_location(dist)) return norm_path.startswith(normalize_path(user_site)) def dist_in_site_packages(dist): # type: (Distribution) -> bool """ Return True if given Distribution is installed in sysconfig.get_python_lib(). """ return normalize_path( dist_location(dist) ).startswith(normalize_path(site_packages)) def dist_is_editable(dist): # type: (Distribution) -> bool """ Return True if given Distribution is an editable install. """ for path_item in sys.path: egg_link = os.path.join(path_item, dist.project_name + '.egg-link') if os.path.isfile(egg_link): return True return False def get_installed_distributions( local_only=True, # type: bool skip=stdlib_pkgs, # type: Container[str] include_editables=True, # type: bool editables_only=False, # type: bool user_only=False, # type: bool paths=None # type: Optional[List[str]] ): # type: (...) -> List[Distribution] """ Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory. If ``paths`` is set, only report the distributions present at the specified list of locations. """ if paths: working_set = pkg_resources.WorkingSet(paths) else: working_set = pkg_resources.working_set if local_only: local_test = dist_is_local else: def local_test(d): return True if include_editables: def editable_test(d): return True else: def editable_test(d): return not dist_is_editable(d) if editables_only: def editables_only_test(d): return dist_is_editable(d) else: def editables_only_test(d): return True if user_only: user_test = dist_in_usersite else: def user_test(d): return True # because of pkg_resources vendoring, mypy cannot find stub in typeshed return [d for d in working_set # type: ignore if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ] def egg_link_path(dist): # type: (Distribution) -> Optional[str] """ Return the path for the .egg-link file if it exists, otherwise, None. There's 3 scenarios: 1) not in a virtualenv try to find in site.USER_SITE, then site_packages 2) in a no-global virtualenv try to find in site_packages 3) in a yes-global virtualenv try to find in site_packages, then site.USER_SITE (don't look in global location) For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations. This method will just return the first one found. """ sites = [] if running_under_virtualenv(): if virtualenv_no_global(): sites.append(site_packages) else: sites.append(site_packages) if user_site: sites.append(user_site) else: if user_site: sites.append(user_site) sites.append(site_packages) for site in sites: egglink = os.path.join(site, dist.project_name) + '.egg-link' if os.path.isfile(egglink): return egglink return None def dist_location(dist): # type: (Distribution) -> str """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if egg_link: return egg_link return dist.location def current_umask(): """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) os.umask(mask) return mask def unzip_file(filename, location, flatten=True): # type: (str, str, bool) -> None """ Unzip the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are not preserved), except that regular file members with any execute permissions (user, group, or world) have "chmod +x" applied after being written. Note that for windows, any execute changes using os.chmod are no-ops per the python docs. """ ensure_dir(location) zipfp = open(filename, 'rb') try: zip = zipfile.ZipFile(zipfp, allowZip64=True) leading = has_leading_dir(zip.namelist()) and flatten for info in zip.infolist(): name = info.filename fn = name if leading: fn = split_leading_dir(name)[1] fn = os.path.join(location, fn) dir = os.path.dirname(fn) if fn.endswith('/') or fn.endswith('\\'): # A directory ensure_dir(fn) else: ensure_dir(dir) # Don't use read() to avoid allocating an arbitrarily large # chunk of memory for the file's content fp = zip.open(name) try: with open(fn, 'wb') as destfp: shutil.copyfileobj(fp, destfp) finally: fp.close() mode = info.external_attr >> 16 # if mode and regular file and any execute permissions for # user/group/world? if mode and stat.S_ISREG(mode) and mode & 0o111: # make dest file have execute for user/group/world # (chmod +x) no-op on windows per python docs os.chmod(fn, (0o777 - current_umask() | 0o111)) finally: zipfp.close() def untar_file(filename, location): # type: (str, str) -> None """ Untar the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are not preserved), except that regular file members with any execute permissions (user, group, or world) have "chmod +x" applied after being written. Note that for windows, any execute changes using os.chmod are no-ops per the python docs. """ ensure_dir(location) if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): mode = 'r:gz' elif filename.lower().endswith(BZ2_EXTENSIONS): mode = 'r:bz2' elif filename.lower().endswith(XZ_EXTENSIONS): mode = 'r:xz' elif filename.lower().endswith('.tar'): mode = 'r' else: logger.warning( 'Cannot determine compression type for file %s', filename, ) mode = 'r:*' tar = tarfile.open(filename, mode) try: leading = has_leading_dir([ member.name for member in tar.getmembers() ]) for member in tar.getmembers(): fn = member.name if leading: # https://github.com/python/mypy/issues/1174 fn = split_leading_dir(fn)[1] # type: ignore path = os.path.join(location, fn) if member.isdir(): ensure_dir(path) elif member.issym(): try: # https://github.com/python/typeshed/issues/2673 tar._extract_member(member, path) # type: ignore except Exception as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( 'In the tar file %s the member %s is invalid: %s', filename, member.name, exc, ) continue else: try: fp = tar.extractfile(member) except (KeyError, AttributeError) as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( 'In the tar file %s the member %s is invalid: %s', filename, member.name, exc, ) continue ensure_dir(os.path.dirname(path)) with open(path, 'wb') as destfp: shutil.copyfileobj(fp, destfp) fp.close() # Update the timestamp (useful for cython compiled files) # https://github.com/python/typeshed/issues/2673 tar.utime(member, path) # type: ignore # member have any execute permissions for user/group/world? if member.mode & 0o111: # make dest file have execute for user/group/world # no-op on windows per python docs os.chmod(path, (0o777 - current_umask() | 0o111)) finally: tar.close() def unpack_file( filename, # type: str location, # type: str content_type, # type: Optional[str] link # type: Optional[Link] ): # type: (...) -> None filename = os.path.realpath(filename) if (content_type == 'application/zip' or filename.lower().endswith(ZIP_EXTENSIONS) or zipfile.is_zipfile(filename)): unzip_file( filename, location, flatten=not filename.endswith('.whl') ) elif (content_type == 'application/x-gzip' or tarfile.is_tarfile(filename) or filename.lower().endswith( TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)): untar_file(filename, location) elif (content_type and content_type.startswith('text/html') and is_svn_page(file_contents(filename))): # We don't really care about this from pip._internal.vcs.subversion import Subversion url = 'svn+' + link.url Subversion().unpack(location, url=url) else: # FIXME: handle? # FIXME: magic signatures? logger.critical( 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' 'cannot detect archive format', filename, location, content_type, ) raise InstallationError( 'Cannot determine archive format of %s' % location ) def format_command_args(args): # type: (List[str]) -> str """ Format command arguments for display. """ return ' '.join(shlex_quote(arg) for arg in args) def make_subprocess_output_error( cmd_args, # type: List[str] cwd, # type: Optional[str] lines, # type: List[Text] exit_status, # type: int ): # type: (...) -> Text """ Create and return the error message to use to log a subprocess error with command output. :param lines: A list of lines, each ending with a newline. """ command = format_command_args(cmd_args) # Convert `command` and `cwd` to text (unicode in Python 2) so we can use # them as arguments in the unicode format string below. This avoids # "UnicodeDecodeError: 'ascii' codec can't decode byte ..." in Python 2 # if either contains a non-ascii character. command_display = str_to_display(command, desc='command bytes') cwd_display = path_to_display(cwd) # We know the joined output value ends in a newline. output = ''.join(lines) msg = ( # Use a unicode string to avoid "UnicodeEncodeError: 'ascii' # codec can't encode character ..." in Python 2 when a format # argument (e.g. `output`) has a non-ascii character. u'Command errored out with exit status {exit_status}:\n' ' command: {command_display}\n' ' cwd: {cwd_display}\n' 'Complete output ({line_count} lines):\n{output}{divider}' ).format( exit_status=exit_status, command_display=command_display, cwd_display=cwd_display, line_count=len(lines), output=output, divider=LOG_DIVIDER, ) return msg def call_subprocess( cmd, # type: List[str] show_stdout=False, # type: bool cwd=None, # type: Optional[str] on_returncode='raise', # type: str extra_ok_returncodes=None, # type: Optional[Iterable[int]] command_desc=None, # type: Optional[str] extra_environ=None, # type: Optional[Mapping[str, Any]] unset_environ=None, # type: Optional[Iterable[str]] spinner=None # type: Optional[SpinnerInterface] ): # type: (...) -> Text """ Args: show_stdout: if true, use INFO to log the subprocess's stderr and stdout streams. Otherwise, use DEBUG. Defaults to False. extra_ok_returncodes: an iterable of integer return codes that are acceptable, in addition to 0. Defaults to None, which means []. unset_environ: an iterable of environment variable names to unset prior to calling subprocess.Popen(). """ if extra_ok_returncodes is None: extra_ok_returncodes = [] if unset_environ is None: unset_environ = [] # Most places in pip use show_stdout=False. What this means is-- # # - We connect the child's output (combined stderr and stdout) to a # single pipe, which we read. # - We log this output to stderr at DEBUG level as it is received. # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't # requested), then we show a spinner so the user can still see the # subprocess is in progress. # - If the subprocess exits with an error, we log the output to stderr # at ERROR level if it hasn't already been displayed to the console # (e.g. if --verbose logging wasn't enabled). This way we don't log # the output to the console twice. # # If show_stdout=True, then the above is still done, but with DEBUG # replaced by INFO. if show_stdout: # Then log the subprocess output at INFO level. log_subprocess = subprocess_logger.info used_level = logging.INFO else: # Then log the subprocess output using DEBUG. This also ensures # it will be logged to the log file (aka user_log), if enabled. log_subprocess = subprocess_logger.debug used_level = logging.DEBUG # Whether the subprocess will be visible in the console. showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level # Only use the spinner if we're not showing the subprocess output # and we have a spinner. use_spinner = not showing_subprocess and spinner is not None if command_desc is None: command_desc = format_command_args(cmd) log_subprocess("Running command %s", command_desc) env = os.environ.copy() if extra_environ: env.update(extra_environ) for name in unset_environ: env.pop(name, None) try: proc = subprocess.Popen( cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, env=env, ) proc.stdin.close() except Exception as exc: subprocess_logger.critical( "Error %s while executing command %s", exc, command_desc, ) raise all_output = [] while True: # The "line" value is a unicode string in Python 2. line = console_to_str(proc.stdout.readline()) if not line: break line = line.rstrip() all_output.append(line + '\n') # Show the line immediately. log_subprocess(line) # Update the spinner. if use_spinner: spinner.spin() try: proc.wait() finally: if proc.stdout: proc.stdout.close() proc_had_error = ( proc.returncode and proc.returncode not in extra_ok_returncodes ) if use_spinner: if proc_had_error: spinner.finish("error") else: spinner.finish("done") if proc_had_error: if on_returncode == 'raise': if not showing_subprocess: # Then the subprocess streams haven't been logged to the # console yet. msg = make_subprocess_output_error( cmd_args=cmd, cwd=cwd, lines=all_output, exit_status=proc.returncode, ) subprocess_logger.error(msg) exc_msg = ( 'Command errored out with exit status {}: {} ' 'Check the logs for full command output.' ).format(proc.returncode, command_desc) raise InstallationError(exc_msg) elif on_returncode == 'warn': subprocess_logger.warning( 'Command "%s" had error code %s in %s', command_desc, proc.returncode, cwd, ) elif on_returncode == 'ignore': pass else: raise ValueError('Invalid value: on_returncode=%s' % repr(on_returncode)) return ''.join(all_output) def _make_build_dir(build_dir): os.makedirs(build_dir) write_delete_marker_file(build_dir) class FakeFile(object): """Wrap a list of lines in an object with readline() to make ConfigParser happy.""" def __init__(self, lines): self._gen = (l for l in lines) def readline(self): try: try: return next(self._gen) except NameError: return self._gen.next() except StopIteration: return '' def __iter__(self): return self._gen class StreamWrapper(StringIO): @classmethod def from_stream(cls, orig_stream): cls.orig_stream = orig_stream return cls() # compileall.compile_dir() needs stdout.encoding to print to stdout @property def encoding(self): return self.orig_stream.encoding @contextlib.contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Taken from Lib/support/__init__.py in the CPython repo. """ orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print('hello') self.assertEqual(stdout.getvalue(), 'hello\n') Taken from Lib/support/__init__.py in the CPython repo. """ return captured_output('stdout') def captured_stderr(): """ See captured_stdout(). """ return captured_output('stderr') class cached_property(object): """A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175 """ def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: # We're being accessed from the class itself, not from an object return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value def get_installed_version(dist_name, working_set=None): """Get the installed version of dist_name avoiding pkg_resources cache""" # Create a requirement that we'll look for inside of setuptools. req = pkg_resources.Requirement.parse(dist_name) if working_set is None: # We want to avoid having this cached, so we need to construct a new # working set each time. working_set = pkg_resources.WorkingSet() # Get the installed distribution from our working set dist = working_set.find(req) # Check to see if we got an installed distribution or not, if we did # we want to return it's version. return dist.version if dist else None def consume(iterator): """Consume an iterable at C speed.""" deque(iterator, maxlen=0) # Simulates an enum def enum(*sequential, **named): enums = dict(zip(sequential, range(len(sequential))), **named) reverse = {value: key for key, value in enums.items()} enums['reverse_mapping'] = reverse return type('Enum', (), enums) def path_to_url(path): # type: (Union[str, Text]) -> str """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. """ path = os.path.normpath(os.path.abspath(path)) url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) return url def build_url_from_netloc(netloc, scheme='https'): # type: (str, str) -> str """ Build a full URL from a netloc. """ if netloc.count(':') >= 2 and '@' not in netloc and '[' not in netloc: # It must be a bare IPv6 address, so wrap it with brackets. netloc = '[{}]'.format(netloc) return '{}://{}'.format(scheme, netloc) def netloc_has_port(netloc): # type: (str) -> bool """ Return whether the netloc has a port part. """ url = build_url_from_netloc(netloc) parsed = urllib_parse.urlparse(url) return bool(parsed.port) def split_auth_from_netloc(netloc): """ Parse out and remove the auth information from a netloc. Returns: (netloc, (username, password)). """ if '@' not in netloc: return netloc, (None, None) # Split from the right because that's how urllib.parse.urlsplit() # behaves if more than one @ is present (which can be checked using # the password attribute of urlsplit()'s return value). auth, netloc = netloc.rsplit('@', 1) if ':' in auth: # Split from the left because that's how urllib.parse.urlsplit() # behaves if more than one : is present (which again can be checked # using the password attribute of the return value) user_pass = auth.split(':', 1) else: user_pass = auth, None user_pass = tuple( None if x is None else urllib_unquote(x) for x in user_pass ) return netloc, user_pass def redact_netloc(netloc): # type: (str) -> str """ Replace the password in a netloc with "****", if it exists. For example, "user:[email protected]" returns "user:****@example.com". """ netloc, (user, password) = split_auth_from_netloc(netloc) if user is None: return netloc password = '' if password is None else ':****' return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user), password=password, netloc=netloc) def _transform_url(url, transform_netloc): """Transform and replace netloc in a url. transform_netloc is a function taking the netloc and returning a tuple. The first element of this tuple is the new netloc. The entire tuple is returned. Returns a tuple containing the transformed url as item 0 and the original tuple returned by transform_netloc as item 1. """ purl = urllib_parse.urlsplit(url) netloc_tuple = transform_netloc(purl.netloc) # stripped url url_pieces = ( purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment ) surl = urllib_parse.urlunsplit(url_pieces) return surl, netloc_tuple def _get_netloc(netloc): return split_auth_from_netloc(netloc) def _redact_netloc(netloc): return (redact_netloc(netloc),) def split_auth_netloc_from_url(url): # type: (str) -> Tuple[str, str, Tuple[str, str]] """ Parse a url into separate netloc, auth, and url with no auth. Returns: (url_without_auth, netloc, (username, password)) """ url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) return url_without_auth, netloc, auth def remove_auth_from_url(url): # type: (str) -> str """Return a copy of url with 'username:password@' removed.""" # username/pass params are passed to subversion through flags # and are not recognized in the url. return _transform_url(url, _get_netloc)[0] def redact_password_from_url(url): # type: (str) -> str """Replace the password in a given url with ****.""" return _transform_url(url, _redact_netloc)[0] def protect_pip_from_modification_on_windows(modifying_pip): """Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ... """ pip_names = [ "pip.exe", "pip{}.exe".format(sys.version_info[0]), "pip{}.{}.exe".format(*sys.version_info[:2]) ] # See https://github.com/pypa/pip/issues/1299 for more discussion should_show_use_python_msg = ( modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names ) if should_show_use_python_msg: new_command = [ sys.executable, "-m", "pip" ] + sys.argv[1:] raise CommandError( 'To modify pip, please run the following command:\n{}' .format(" ".join(new_command)) )
[]
[]
[ "PIP_NO_INPUT", "PIP_EXISTS_ACTION" ]
[]
["PIP_NO_INPUT", "PIP_EXISTS_ACTION"]
python
2
0
cmd/gateway/gcs/gateway-gcs.go
/* * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package gcs import ( "context" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math" "net/http" "os" "path" "strconv" "regexp" "strings" "time" "cloud.google.com/go/storage" humanize "github.com/dustin/go-humanize" "github.com/minio/cli" miniogopolicy "github.com/minio/minio-go/v6/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/policy/condition" "github.com/minio/minio/pkg/env" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" minio "github.com/minio/minio/cmd" ) var ( // Project ID format is not valid. errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid") // Project ID not found errGCSProjectIDNotFound = fmt.Errorf("Unknown project id") // Invalid format. errGCSFormat = fmt.Errorf("Unknown format") ) const ( // Path where multipart objects are saved. // If we change the backend format we will use a different url path like /multipart/v2 // but we will not migrate old data. gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1" // Multipart meta file. gcsMinioMultipartMeta = "gcs.json" // gcs.json version number gcsMinioMultipartMetaCurrentVersion = "1" // token prefixed with GCS returned marker to differentiate // from user supplied marker. gcsTokenPrefix = "{minio}" // Maximum component object count to create a composite object. // Refer https://cloud.google.com/storage/docs/composite-objects gcsMaxComponents = 32 // Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp gcsCleanupInterval = time.Hour * 24 // The cleanup routine deletes files older than 2 weeks in minio.sys.tmp gcsMultipartExpiry = time.Hour * 24 * 14 // Project ID key in credentials.json gcsProjectIDKey = "project_id" gcsBackend = "gcs" ) func init() { const gcsGatewayTemplate = `NAME: {{.HelpName}} - {{.Usage}} USAGE: {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [PROJECTID] {{if .VisibleFlags}} FLAGS: {{range .VisibleFlags}}{{.}} {{end}}{{end}} PROJECTID: optional GCS project-id expected GOOGLE_APPLICATION_CREDENTIALS env is not set GOOGLE_APPLICATION_CREDENTIALS: path to credentials.json, generated it from here https://developers.google.com/identity/protocols/application-default-credentials EXAMPLES: 1. Start minio gateway server for GCS backend {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey {{.Prompt}} {{.HelpName}} mygcsprojectid 2. Start minio gateway server for GCS backend with edge caching enabled {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 {{.Prompt}} {{.HelpName}} mygcsprojectid ` minio.RegisterGatewayCommand(cli.Command{ Name: gcsBackend, Usage: "Google Cloud Storage", Action: gcsGatewayMain, CustomHelpTemplate: gcsGatewayTemplate, HideHelpCommand: true, }) } // Handler for 'minio gateway gcs' command line. func gcsGatewayMain(ctx *cli.Context) { projectID := ctx.Args().First() if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { logger.LogIf(minio.GlobalContext, errGCSProjectIDNotFound, logger.Application) cli.ShowCommandHelpAndExit(ctx, "gcs", 1) } if projectID != "" && !isValidGCSProjectIDFormat(projectID) { reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First()) contxt := logger.SetReqInfo(minio.GlobalContext, reqInfo) logger.LogIf(contxt, errGCSInvalidProjectID, logger.Application) cli.ShowCommandHelpAndExit(ctx, "gcs", 1) } minio.StartGateway(ctx, &GCS{projectID}) } // GCS implements Azure. type GCS struct { projectID string } // Name returns the name of gcs ObjectLayer. func (g *GCS) Name() string { return gcsBackend } // NewGatewayLayer returns gcs ObjectLayer. func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { ctx := minio.GlobalContext var err error if g.projectID == "" { // If project ID is not provided on command line, we figure it out // from the credentials.json file. g.projectID, err = gcsParseProjectID(env.Get("GOOGLE_APPLICATION_CREDENTIALS", "")) if err != nil { return nil, err } } metrics := minio.NewMetrics() t := &minio.MetricsTransport{ Transport: minio.NewGatewayHTTPTransport(), Metrics: metrics, } // Initialize a GCS client. // Send user-agent in this format for Google to obtain usage insights while participating in the // Google Cloud Technology Partners (https://cloud.google.com/partners/) client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("MinIO/%s (GPN:MinIO;)", minio.Version))) if err != nil { return nil, err } gcs := &gcsGateway{ client: client, projectID: g.projectID, metrics: metrics, httpClient: &http.Client{ Transport: t, }, } // Start background process to cleanup old files in minio.sys.tmp go gcs.CleanupGCSMinioSysTmp(ctx) return gcs, nil } // Production - GCS gateway is production ready. func (g *GCS) Production() bool { return true } // Stored in gcs.json - Contents of this file is not used anywhere. It can be // used for debugging purposes. type gcsMultipartMetaV1 struct { Version string `json:"version"` // Version number Bucket string `json:"bucket"` // Bucket name Object string `json:"object"` // Object name } // Returns name of the multipart meta object. func gcsMultipartMetaName(uploadID string) string { return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) } // Returns name of the part object. func gcsMultipartDataName(uploadID string, partNumber int, etag string) string { return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag) } // Convert MinIO errors to minio object layer errors. func gcsToObjectError(err error, params ...string) error { if err == nil { return nil } bucket := "" object := "" uploadID := "" if len(params) >= 1 { bucket = params[0] } if len(params) == 2 { object = params[1] } if len(params) == 3 { uploadID = params[2] } // in some cases just a plain error is being returned switch err.Error() { case "storage: bucket doesn't exist": err = minio.BucketNotFound{ Bucket: bucket, } return err case "storage: object doesn't exist": if uploadID != "" { err = minio.InvalidUploadID{ UploadID: uploadID, } } else { err = minio.ObjectNotFound{ Bucket: bucket, Object: object, } } return err } googleAPIErr, ok := err.(*googleapi.Error) if !ok { // We don't interpret non MinIO errors. As minio errors will // have StatusCode to help to convert to object errors. return err } if len(googleAPIErr.Errors) == 0 { return err } reason := googleAPIErr.Errors[0].Reason message := googleAPIErr.Errors[0].Message switch reason { case "required": // Anonymous users does not have storage.xyz access to project 123. fallthrough case "keyInvalid": fallthrough case "forbidden": err = minio.PrefixAccessDenied{ Bucket: bucket, Object: object, } case "invalid": err = minio.BucketNameInvalid{ Bucket: bucket, } case "notFound": if object != "" { err = minio.ObjectNotFound{ Bucket: bucket, Object: object, } break } err = minio.BucketNotFound{Bucket: bucket} case "conflict": if message == "You already own this bucket. Please select another name." { err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} break } if message == "Sorry, that name is not available. Please try a different one." { err = minio.BucketAlreadyExists{Bucket: bucket} break } err = minio.BucketNotEmpty{Bucket: bucket} } return err } // gcsProjectIDRegex defines a valid gcs project id format var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$") // isValidGCSProjectIDFormat - checks if a given project id format is valid or not. // Project IDs must start with a lowercase letter and can have lowercase ASCII letters, // digits or hyphens. Project IDs must be between 6 and 30 characters. // Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section) func isValidGCSProjectIDFormat(projectID string) bool { // Checking projectID format return gcsProjectIDRegex.MatchString(projectID) } // gcsGateway - Implements gateway for MinIO and GCS compatible object storage servers. type gcsGateway struct { minio.GatewayUnsupported client *storage.Client httpClient *http.Client metrics *minio.Metrics projectID string } // Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file. func gcsParseProjectID(credsFile string) (projectID string, err error) { contents, err := ioutil.ReadFile(credsFile) if err != nil { return projectID, err } googleCreds := make(map[string]string) if err = json.Unmarshal(contents, &googleCreds); err != nil { return projectID, err } return googleCreds[gcsProjectIDKey], err } // GetMetrics returns this gateway's metrics func (l *gcsGateway) GetMetrics(ctx context.Context) (*minio.Metrics, error) { return l.metrics, nil } // Cleanup old files in minio.sys.tmp of the given bucket. func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(ctx context.Context, bucket string) { it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false}) for { attrs, err := it.Next() if err != nil { if err != iterator.Done { reqInfo := &logger.ReqInfo{BucketName: bucket} ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) logger.LogIf(ctx, err) } return } if time.Since(attrs.Updated) > gcsMultipartExpiry { // Delete files older than 2 weeks. err := l.client.Bucket(bucket).Object(attrs.Name).Delete(ctx) if err != nil { reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name} ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) logger.LogIf(ctx, err) return } } } } // Cleanup old files in minio.sys.tmp of all buckets. func (l *gcsGateway) CleanupGCSMinioSysTmp(ctx context.Context) { for { it := l.client.Buckets(ctx, l.projectID) for { attrs, err := it.Next() if err != nil { break } l.CleanupGCSMinioSysTmpBucket(ctx, attrs.Name) } // Run the cleanup loop every 1 day. time.Sleep(gcsCleanupInterval) } } // Shutdown - save any gateway metadata to disk // if necessary and reload upon next restart. func (l *gcsGateway) Shutdown(ctx context.Context) error { return nil } // StorageInfo - Not relevant to GCS backend. func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { si.Backend.Type = minio.BackendGateway si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com") return si, nil } // MakeBucketWithLocation - Create a new container on GCS backend. func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } bkt := l.client.Bucket(bucket) // we'll default to the us multi-region in case of us-east-1 location := opts.Location if location == "us-east-1" { location = "us" } err := bkt.Create(ctx, l.projectID, &storage.BucketAttrs{ Location: location, }) logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) } // GetBucketInfo - Get bucket metadata.. func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) { attrs, err := l.client.Bucket(bucket).Attrs(ctx) if err != nil { logger.LogIf(ctx, err) return minio.BucketInfo{}, gcsToObjectError(err, bucket) } return minio.BucketInfo{ Name: attrs.Name, Created: attrs.Created, }, nil } // ListBuckets lists all buckets under your project-id on GCS. func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { it := l.client.Buckets(ctx, l.projectID) // Iterate and capture all the buckets. for { attrs, ierr := it.Next() if ierr == iterator.Done { break } if ierr != nil { return buckets, gcsToObjectError(ierr) } buckets = append(buckets, minio.BucketInfo{ Name: attrs.Name, Created: attrs.Created, }) } return buckets, nil } // DeleteBucket delete a bucket on GCS. func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ Delimiter: minio.SlashSeparator, Versions: false, }) // We list the bucket and if we find any objects we return BucketNotEmpty error. If we // find only "minio.sys.tmp/" then we remove it before deleting the bucket. gcsMinioPathFound := false nonGCSMinioPathFound := false for { objAttrs, err := itObject.Next() if err == iterator.Done { break } if err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err) } if objAttrs.Prefix == minio.GatewayMinioSysTmp { gcsMinioPathFound = true continue } nonGCSMinioPathFound = true break } if nonGCSMinioPathFound { logger.LogIf(ctx, minio.BucketNotEmpty{}) return gcsToObjectError(minio.BucketNotEmpty{}) } if gcsMinioPathFound { // Remove minio.sys.tmp before deleting the bucket. itObject = l.client.Bucket(bucket).Objects(ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp}) for { objAttrs, err := itObject.Next() if err == iterator.Done { break } if err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err) } err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(ctx) if err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err) } } } err := l.client.Bucket(bucket).Delete(ctx) logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) } func toGCSPageToken(name string) string { length := uint16(len(name)) b := []byte{ 0xa, byte(length & 0xFF), } length = length >> 7 if length > 0 { b = append(b, byte(length&0xFF)) } b = append(b, []byte(name)...) return base64.StdEncoding.EncodeToString(b) } // Returns true if marker was returned by GCS, i.e prefixed with // ##minio by minio gcs minio. func isGCSMarker(marker string) bool { return strings.HasPrefix(marker, gcsTokenPrefix) } // ListObjects - lists all blobs in GCS bucket filtered by prefix func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) { if maxKeys == 0 { return minio.ListObjectsInfo{}, nil } it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ Delimiter: delimiter, Prefix: prefix, Versions: false, }) // To accommodate S3-compatible applications using // ListObjectsV1 to use object keys as markers to control the // listing of objects, we use the following encoding scheme to // distinguish between GCS continuation tokens and application // supplied markers. // // - NextMarker in ListObjectsV1 response is constructed by // prefixing "{minio}" to the GCS continuation token, // e.g, "{minio}CgRvYmoz" // // - Application supplied markers are transformed to a // GCS continuation token. // If application is using GCS continuation token we should // strip the gcsTokenPrefix we added. token := "" if marker != "" { if isGCSMarker(marker) { token = strings.TrimPrefix(marker, gcsTokenPrefix) } else { token = toGCSPageToken(marker) } } nextMarker := "" var prefixes []string var objects []minio.ObjectInfo var nextPageToken string var err error pager := iterator.NewPager(it, maxKeys, token) for { gcsObjects := make([]*storage.ObjectAttrs, 0) nextPageToken, err = pager.NextPage(&gcsObjects) if err != nil { logger.LogIf(ctx, err) return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix) } for _, attrs := range gcsObjects { // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes // returned may not total maxKeys. This behavior is compatible with the S3 spec which // allows the response to include less keys than maxKeys. if attrs.Prefix == minio.GatewayMinioSysTmp { // We don't return our metadata prefix. continue } if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // which will be helpful to observe the "directory structure" for debugging purposes. if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { continue } } if attrs.Prefix != "" { prefixes = append(prefixes, attrs.Prefix) } else { objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) } // The NextMarker property should only be set in the response if a delimiter is used if delimiter != "" { if attrs.Prefix > nextMarker { nextMarker = attrs.Prefix } else if attrs.Name > nextMarker { nextMarker = attrs.Name } } } // Exit the loop if at least one item can be returned from // the current page or there are no more pages available if nextPageToken == "" || len(prefixes)+len(objects) > 0 { break } } if nextPageToken == "" { nextMarker = "" } else if nextMarker != "" { nextMarker = gcsTokenPrefix + toGCSPageToken(nextMarker) } return minio.ListObjectsInfo{ IsTruncated: nextPageToken != "", NextMarker: nextMarker, Prefixes: prefixes, Objects: objects, }, nil } // ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) { if maxKeys == 0 { return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, nil } it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ Delimiter: delimiter, Prefix: prefix, Versions: false, }) token := continuationToken if token == "" && startAfter != "" { token = toGCSPageToken(startAfter) } var prefixes []string var objects []minio.ObjectInfo var nextPageToken string var err error pager := iterator.NewPager(it, maxKeys, token) for { gcsObjects := make([]*storage.ObjectAttrs, 0) nextPageToken, err = pager.NextPage(&gcsObjects) if err != nil { logger.LogIf(ctx, err) return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix) } for _, attrs := range gcsObjects { // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes // returned may not total maxKeys. This behavior is compatible with the S3 spec which // allows the response to include less keys than maxKeys. if attrs.Prefix == minio.GatewayMinioSysTmp { // We don't return our metadata prefix. continue } if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // which will be helpful to observe the "directory structure" for debugging purposes. if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { continue } } if attrs.Prefix != "" { prefixes = append(prefixes, attrs.Prefix) } else { objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) } } // Exit the loop if at least one item can be returned from // the current page or there are no more pages available if nextPageToken == "" || len(prefixes)+len(objects) > 0 { break } } return minio.ListObjectsV2Info{ IsTruncated: nextPageToken != "", ContinuationToken: continuationToken, NextContinuationToken: nextPageToken, Prefixes: prefixes, Objects: objects, }, nil } // GetObjectNInfo - returns object info and locked object ReadCloser func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { var objInfo minio.ObjectInfo objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) if err != nil { return nil, err } var startOffset, length int64 startOffset, length, err = rs.GetOffsetLength(objInfo.Size) if err != nil { return nil, err } pr, pw := io.Pipe() go func() { err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) pw.CloseWithError(err) }() // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) } // GetObject - reads an object from GCS. Supports additional // parameters like offset and length which are synonymous with // HTTP Range requests. // // startOffset indicates the starting read location of the object. // length indicates the total length of the object. func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { logger.LogIf(ctx, err, logger.Application) return gcsToObjectError(err, bucket) } // GCS storage decompresses a gzipped object by default and returns the data. // Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding // Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able // to download the object in compressed state. // Calling ReadCompressed with true accomplishes that. object := l.client.Bucket(bucket).Object(key).ReadCompressed(true) r, err := object.NewRangeReader(ctx, startOffset, length) if err != nil { logger.LogIf(ctx, err, logger.Application) return gcsToObjectError(err, bucket, key) } defer r.Close() if _, err := io.Copy(writer, r); err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket, key) } return nil } // fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo { // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag metadata := make(map[string]string) var ( expiry time.Time e error ) for k, v := range attrs.Metadata { k = http.CanonicalHeaderKey(k) // Translate the GCS custom metadata prefix if strings.HasPrefix(k, "X-Goog-Meta-") { k = strings.Replace(k, "X-Goog-Meta-", "X-Amz-Meta-", 1) } if k == "Expires" { if expiry, e = time.Parse(http.TimeFormat, v); e == nil { expiry = expiry.UTC() } continue } metadata[k] = v } if attrs.ContentType != "" { metadata["Content-Type"] = attrs.ContentType } if attrs.ContentEncoding != "" { metadata["Content-Encoding"] = attrs.ContentEncoding } if attrs.CacheControl != "" { metadata["Cache-Control"] = attrs.CacheControl } if attrs.ContentDisposition != "" { metadata["Content-Disposition"] = attrs.ContentDisposition } if attrs.ContentLanguage != "" { metadata["Content-Language"] = attrs.ContentLanguage } etag := hex.EncodeToString(attrs.MD5) if etag == "" { etag = minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)) } return minio.ObjectInfo{ Name: attrs.Name, Bucket: attrs.Bucket, ModTime: attrs.Updated, Size: attrs.Size, ETag: etag, UserDefined: metadata, ContentType: attrs.ContentType, ContentEncoding: attrs.ContentEncoding, Expires: expiry, } } // applyMetadataToGCSAttrs applies metadata to a GCS ObjectAttrs instance func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAttrs) { attrs.Metadata = make(map[string]string) for k, v := range metadata { k = http.CanonicalHeaderKey(k) switch { case strings.HasPrefix(k, "X-Amz-Meta-"): // Translate the S3 user-defined metadata prefix k = strings.Replace(k, "X-Amz-Meta-", "x-goog-meta-", 1) attrs.Metadata[k] = v case k == "Content-Type": attrs.ContentType = v case k == "Content-Encoding": attrs.ContentEncoding = v case k == "Cache-Control": attrs.CacheControl = v case k == "Content-Disposition": attrs.ContentDisposition = v case k == "Content-Language": attrs.ContentLanguage = v } } } // GetObjectInfo - reads object info and replies back ObjectInfo func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { logger.LogIf(ctx, err, logger.Application) return minio.ObjectInfo{}, gcsToObjectError(err, bucket) } attrs, err := l.client.Bucket(bucket).Object(object).Attrs(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) } return fromGCSAttrsToObjectInfo(attrs), nil } // PutObject - Create a new object with the incoming data, func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) { data := r.Reader nctx, cancel := context.WithCancel(ctx) defer cancel() // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(nctx); err != nil { logger.LogIf(ctx, err, logger.Application) return minio.ObjectInfo{}, gcsToObjectError(err, bucket) } object := l.client.Bucket(bucket).Object(key) w := object.NewWriter(nctx) // Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below // the current chunk-size of the writer. This avoids an unnecessary memory allocation. if data.Size() < int64(w.ChunkSize) { w.ChunkSize = 0 } applyMetadataToGCSAttrs(opts.UserDefined, &w.ObjectAttrs) if _, err := io.Copy(w, data); err != nil { // Close the object writer upon error. logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } // Close the object writer upon success. if err := w.Close(); err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } return fromGCSAttrsToObjectInfo(w.Attrs()), nil } // CopyObject - Copies a blob from source container to destination container. func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { return minio.ObjectInfo{}, minio.PreConditionFailed{} } src := l.client.Bucket(srcBucket).Object(srcObject) dst := l.client.Bucket(destBucket).Object(destObject) copier := dst.CopierFrom(src) applyMetadataToGCSAttrs(srcInfo.UserDefined, &copier.ObjectAttrs) attrs, err := copier.Run(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject) } return fromGCSAttrsToObjectInfo(attrs), nil } // DeleteObject - Deletes a blob in bucket func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { err := l.client.Bucket(bucket).Object(object).Delete(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) } return minio.ObjectInfo{ Bucket: bucket, Name: object, }, nil } func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) if errs[idx] == nil { dobjects[idx] = minio.DeletedObject{ ObjectName: object.ObjectName, } } } return dobjects, errs } // NewMultipartUpload - upload object in multiple parts func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, o minio.ObjectOptions) (uploadID string, err error) { // generate new uploadid uploadID = minio.MustGetUUID() // generate name for part zero meta := gcsMultipartMetaName(uploadID) w := l.client.Bucket(bucket).Object(meta).NewWriter(ctx) defer w.Close() applyMetadataToGCSAttrs(o.UserDefined, &w.ObjectAttrs) if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{ gcsMinioMultipartMetaCurrentVersion, bucket, key, }); err != nil { logger.LogIf(ctx, err) return "", gcsToObjectError(err, bucket, key) } return uploadID, nil } // ListMultipartUploads - lists the (first) multipart upload for an object // matched _exactly_ by the prefix func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) { // List objects under <bucket>/gcsMinioMultipartPathV1 it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ Prefix: gcsMinioMultipartPathV1, }) var uploads []minio.MultipartInfo for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { logger.LogIf(ctx, err) return minio.ListMultipartsInfo{ KeyMarker: keyMarker, UploadIDMarker: uploadIDMarker, MaxUploads: maxUploads, Prefix: prefix, Delimiter: delimiter, }, gcsToObjectError(err) } // Skip entries other than gcs.json if !strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { continue } // Extract multipart upload information from gcs.json obj := l.client.Bucket(bucket).Object(attrs.Name) objReader, rErr := obj.NewReader(ctx) if rErr != nil { logger.LogIf(ctx, rErr) return minio.ListMultipartsInfo{}, rErr } defer objReader.Close() var mpMeta gcsMultipartMetaV1 dec := json.NewDecoder(objReader) decErr := dec.Decode(&mpMeta) if decErr != nil { logger.LogIf(ctx, decErr) return minio.ListMultipartsInfo{}, decErr } if prefix == mpMeta.Object { // Extract uploadId // E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) if len(components) != 5 { compErr := errors.New("Invalid multipart upload format") logger.LogIf(ctx, compErr) return minio.ListMultipartsInfo{}, compErr } upload := minio.MultipartInfo{ Object: mpMeta.Object, UploadID: components[3], Initiated: attrs.Created, } uploads = append(uploads, upload) } } return minio.ListMultipartsInfo{ KeyMarker: keyMarker, UploadIDMarker: uploadIDMarker, MaxUploads: maxUploads, Prefix: prefix, Delimiter: delimiter, Uploads: uploads, NextKeyMarker: "", NextUploadIDMarker: "", IsTruncated: false, }, nil } // Checks if minio.sys.tmp/multipart/v1/<upload-id>/gcs.json exists, returns // an object layer compatible error upon any error. func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error { _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(ctx) logger.LogIf(ctx, err) return gcsToObjectError(err, bucket, key, uploadID) } // PutObjectPart puts a part of object in bucket func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.PartInfo, error) { data := r.Reader if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { return minio.PartInfo{}, err } etag := data.MD5HexString() if etag == "" { // Generate random ETag. etag = minio.GenETag() } object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) w := object.NewWriter(ctx) // Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case // where it tries to upload 0 bytes in the last chunk and get error from server. w.ChunkSize = 0 if _, err := io.Copy(w, data); err != nil { // Make sure to close object writer upon error. w.Close() logger.LogIf(ctx, err) return minio.PartInfo{}, gcsToObjectError(err, bucket, key) } // Make sure to close the object writer upon success. if err := w.Close(); err != nil { logger.LogIf(ctx, err) return minio.PartInfo{}, gcsToObjectError(err, bucket, key) } return minio.PartInfo{ PartNumber: partNumber, ETag: etag, LastModified: minio.UTCNow(), Size: data.Size(), }, nil } // gcsGetPartInfo returns PartInfo of a given object part func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) { components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) if len(components) != 5 { logger.LogIf(ctx, errors.New("Invalid multipart upload format")) return minio.PartInfo{}, errors.New("Invalid multipart upload format") } partComps := strings.SplitN(components[4], ".", 2) if len(partComps) != 2 { logger.LogIf(ctx, errors.New("Invalid multipart part format")) return minio.PartInfo{}, errors.New("Invalid multipart part format") } partNum, pErr := strconv.Atoi(partComps[0]) if pErr != nil { logger.LogIf(ctx, pErr) return minio.PartInfo{}, errors.New("Invalid part number") } return minio.PartInfo{ PartNumber: partNum, LastModified: attrs.Updated, Size: attrs.Size, ETag: partComps[1], }, nil } // GetMultipartInfo returns multipart info of the uploadId of the object func (l *gcsGateway) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { result.Bucket = bucket result.Object = object result.UploadID = uploadID return result, nil } // ListObjectParts returns all object parts for specified object in specified bucket func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (minio.ListPartsInfo, error) { it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ Prefix: path.Join(gcsMinioMultipartPathV1, uploadID), }) var ( count int partInfos []minio.PartInfo ) isTruncated := true for count < maxParts { attrs, err := it.Next() if err == iterator.Done { isTruncated = false break } if err != nil { logger.LogIf(ctx, err) return minio.ListPartsInfo{}, gcsToObjectError(err) } if strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { continue } partInfo, pErr := gcsGetPartInfo(ctx, attrs) if pErr != nil { logger.LogIf(ctx, pErr) return minio.ListPartsInfo{}, pErr } if partInfo.PartNumber <= partNumberMarker { continue } partInfos = append(partInfos, partInfo) count++ } nextPartNumberMarker := 0 if isTruncated { nextPartNumberMarker = partInfos[maxParts-1].PartNumber } return minio.ListPartsInfo{ Bucket: bucket, Object: key, UploadID: uploadID, PartNumberMarker: partNumberMarker, NextPartNumberMarker: nextPartNumberMarker, MaxParts: maxParts, Parts: partInfos, IsTruncated: isTruncated, }, nil } // Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error { prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID) // iterate through all parts and delete them it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: prefix, Versions: false}) for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket, key) } object := l.client.Bucket(bucket).Object(attrs.Name) // Ignore the error as parallel AbortMultipartUpload might have deleted it. object.Delete(ctx) } return nil } // AbortMultipartUpload aborts a ongoing multipart upload func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, key string, uploadID string) error { if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { return err } return l.cleanupMultipartUpload(ctx, bucket, key, uploadID) } // CompleteMultipartUpload completes ongoing multipart upload and finalizes object // Note that there is a limit (currently 32) to the number of components that can // be composed in a single operation. There is a per-project rate limit (currently 200) // to the number of source objects you can compose per second. func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (minio.ObjectInfo, error) { meta := gcsMultipartMetaName(uploadID) object := l.client.Bucket(bucket).Object(meta) partZeroAttrs, err := object.Attrs(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID) } r, err := object.NewReader(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } defer r.Close() // Check version compatibility of the meta file before compose() multipartMeta := gcsMultipartMetaV1{} if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { logger.LogIf(ctx, errGCSFormat) return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key) } // Validate if the gcs.json stores valid entries for the bucket and key. if multipartMeta.Bucket != bucket || multipartMeta.Object != key { return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{ UploadID: uploadID, }, bucket, key) } var parts []*storage.ObjectHandle partSizes := make([]int64, len(uploadedParts)) for i, uploadedPart := range uploadedParts { parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag))) partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(ctx) if pErr != nil { logger.LogIf(ctx, pErr) return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID) } partSizes[i] = partAttr.Size } // Error out if parts except last part sizing < 5MiB. for i, size := range partSizes[:len(partSizes)-1] { if size < 5*humanize.MiByte { logger.LogIf(ctx, minio.PartTooSmall{ PartNumber: uploadedParts[i].PartNumber, PartSize: size, PartETag: uploadedParts[i].ETag, }) return minio.ObjectInfo{}, minio.PartTooSmall{ PartNumber: uploadedParts[i].PartNumber, PartSize: size, PartETag: uploadedParts[i].ETag, } } } // Returns name of the composed object. gcsMultipartComposeName := func(uploadID string, composeNumber int) string { return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber) } composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) if composeCount > 1 { // Create composes of every 32 parts. composeParts := make([]*storage.ObjectHandle, composeCount) for i := 0; i < composeCount; i++ { // Create 'composed-object-N' using next 32 parts. composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i)) start := i * gcsMaxComponents end := start + gcsMaxComponents if end > len(parts) { end = len(parts) } composer := composeParts[i].ComposerFrom(parts[start:end]...) composer.ContentType = partZeroAttrs.ContentType composer.Metadata = partZeroAttrs.Metadata if _, err = composer.Run(ctx); err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } } // As composes are successfully created, final object needs to be created using composes. parts = composeParts } composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...) composer.ContentType = partZeroAttrs.ContentType composer.ContentEncoding = partZeroAttrs.ContentEncoding composer.CacheControl = partZeroAttrs.CacheControl composer.ContentDisposition = partZeroAttrs.ContentDisposition composer.ContentLanguage = partZeroAttrs.ContentLanguage composer.Metadata = partZeroAttrs.Metadata attrs, err := composer.Run(ctx) if err != nil { logger.LogIf(ctx, err) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil { return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) } return fromGCSAttrsToObjectInfo(attrs), nil } // SetBucketPolicy - Set policy on bucket func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) if err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) } var policies []minio.BucketAccessPolicy for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, }) } prefix := bucket + "/*" // For all objects inside the bucket. if len(policies) != 1 { logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } if policies[0].Prefix != prefix { logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } acl := l.client.Bucket(bucket).ACL() if policies[0].Policy == miniogopolicy.BucketPolicyNone { if err := acl.Delete(ctx, storage.AllUsers); err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) } return nil } var role storage.ACLRole switch policies[0].Policy { case miniogopolicy.BucketPolicyReadOnly: role = storage.RoleReader case miniogopolicy.BucketPolicyWriteOnly: role = storage.RoleWriter default: logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } if err := acl.Set(ctx, storage.AllUsers, role); err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) } return nil } // GetBucketPolicy - Get policy on bucket func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { rules, err := l.client.Bucket(bucket).ACL().List(ctx) if err != nil { return nil, gcsToObjectError(err, bucket) } var readOnly, writeOnly bool for _, r := range rules { if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner { continue } switch r.Role { case storage.RoleReader: readOnly = true case storage.RoleWriter: writeOnly = true } } actionSet := policy.NewActionSet() if readOnly { actionSet.Add(policy.GetBucketLocationAction) actionSet.Add(policy.ListBucketAction) actionSet.Add(policy.GetObjectAction) } if writeOnly { actionSet.Add(policy.GetBucketLocationAction) actionSet.Add(policy.ListBucketMultipartUploadsAction) actionSet.Add(policy.AbortMultipartUploadAction) actionSet.Add(policy.DeleteObjectAction) actionSet.Add(policy.ListMultipartUploadPartsAction) actionSet.Add(policy.PutObjectAction) } // Return NoSuchBucketPolicy error, when policy is not set if len(actionSet) == 0 { return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket) } return &policy.Policy{ Version: policy.DefaultVersion, Statements: []policy.Statement{ policy.NewStatement( policy.Allow, policy.NewPrincipal("*"), actionSet, policy.NewResourceSet( policy.NewResource(bucket, ""), policy.NewResource(bucket, "*"), ), condition.NewFunctions(), ), }, }, nil } // DeleteBucketPolicy - Delete all policies on bucket func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error { // This only removes the storage.AllUsers policies if err := l.client.Bucket(bucket).ACL().Delete(ctx, storage.AllUsers); err != nil { return gcsToObjectError(err, bucket) } return nil } // IsCompressionSupported returns whether compression is applicable for this layer. func (l *gcsGateway) IsCompressionSupported() bool { return false } // IsReady returns whether the layer is ready to take requests. func (l *gcsGateway) IsReady(ctx context.Context) bool { return minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com") }
[ "\"GOOGLE_APPLICATION_CREDENTIALS\"" ]
[]
[ "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_APPLICATION_CREDENTIALS"]
go
1
0
signer/appengine.go
package signer import ( "context" "fmt" "os" "google.golang.org/appengine" ) type appengineSigner struct{} func (s *appengineSigner) SignJwt(ctx context.Context, c string) (string, error) { certificates, err := appengine.PublicCertificates(ctx) if err != nil { return "", err } for _, certificate := range certificates { kid := certificate.KeyName key, signed, err := signJwtHelper(ctx, c, kid, s) if err != nil { return "", err } if key != certificate.KeyName { continue } return signed, nil } return "", fmt.Errorf("key not matched") } func (s *appengineSigner) ServiceAccount(ctx context.Context) string { email, err := appengine.ServiceAccount(ctx) if err != nil { return "" } return email } func (s *appengineSigner) SignBlob(ctx context.Context, b []byte) (string, []byte, error) { return appengine.SignBytes(ctx, b) } func newAppEngineSigner() (Signer, error) { return &appengineSigner{}, nil } func isSupportedAppEngineRuntime() bool { return appengine.IsStandard() && os.Getenv("GAE_RUNTIME") == "go111" }
[ "\"GAE_RUNTIME\"" ]
[]
[ "GAE_RUNTIME" ]
[]
["GAE_RUNTIME"]
go
1
0
infra/modules/providers/azure/cosmosdb/tests/integration/cosmos.go
package integration import ( "os" "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb" httpClient "github.com/gruntwork-io/terratest/modules/http-helper" "github.com/microsoft/cobalt/test-harness/infratests" "github.com/microsoft/cobalt/test-harness/terratest-extensions/modules/azure" "github.com/stretchr/testify/require" ) var subscription = os.Getenv("ARM_SUBSCRIPTION_ID") // validateOutputs - Asserts that expected output values are present. func validateOutputs(t *testing.T, id string, endpoint string, primaryMasterKey string, connectionStrings []interface{}) { require.NotEqual(t, "", id, "ID not returned.") require.NotEmpty(t, endpoint, "Endpoint not returned.") require.NotEmpty(t, primaryMasterKey, "Master Key missing.") require.Equal(t, 4, len(connectionStrings), "Unexpected number of connection strings.") } // healthCheck - Asserts that the deployment was successful. func healthCheck(t *testing.T, provisionState *string) { require.Equal(t, "Succeeded", *provisionState, "The deployment hasn't succeeded.") } // validateOfferType - Asserts that the fixed offer type "Standard" has not changed. func validateOfferType(t *testing.T, offerType documentdb.DatabaseAccountOfferType) { require.Equal(t, documentdb.Standard, offerType, "The offer type is incorrect.") } // validateFailOverPriority - Asserts that the fixed fail over priority '0' has not changed. func validateFailOverPriority(t *testing.T, failOverPolicy documentdb.FailoverPolicy) { require.Equal(t, int32(0), *failOverPolicy.FailoverPriority, "The fail over priority is incorrect.") } // getModuleOutputs - Extracts the output variables from property map. func getModuleOutputs(output infratests.TerraformOutput, outputName string) (id string, endpoint string, primaryMasterKey string, connectionStrings []interface{}) { properties := output[outputName].(map[string]interface{}) cosmosDBProperties := properties["cosmosdb"].(map[string]interface{}) id = cosmosDBProperties["id"].(string) endpoint = cosmosDBProperties["endpoint"].(string) primaryMasterKey = cosmosDBProperties["primary_master_key"].(string) connectionStrings = cosmosDBProperties["connection_strings"].([]interface{}) return } // validateServiceResponse - Attempt to perform a HTTP request to the live endpoint. func validateServiceResponse(t *testing.T, output infratests.TerraformOutput, outputName string) { _, endpoint, _, _ := getModuleOutputs(output, outputName) statusCode, _ := httpClient.HttpGet(t, endpoint) require.Equal(t, 401, statusCode, "Service did not respond with the expected Unauthorized status code.") } // InspectProvisionedCosmosDBAccount - Runs test assertions to validate that a provisioned CosmosDB Account // is operational. func InspectProvisionedCosmosDBAccount(resourceGroupOutputName, accountName, outputName string) func(t *testing.T, output infratests.TerraformOutput) { return func(t *testing.T, output infratests.TerraformOutput) { resourceGroupName := output[resourceGroupOutputName].(string) accountName := output[accountName].(string) result := azure.GetCosmosDBAccount(t, subscription, resourceGroupName, accountName) healthCheck(t, result.ProvisioningState) validateOfferType(t, result.DatabaseAccountOfferType) failOverPolicies := *result.FailoverPolicies require.Equal(t, 1, len(failOverPolicies)) validateFailOverPriority(t, failOverPolicies[0]) validateServiceResponse(t, output, outputName) } } // InspectCosmosDBModuleOutputs - Runs test assertions to validate that the module outputs are valid. func InspectCosmosDBModuleOutputs(outputName string) func(t *testing.T, output infratests.TerraformOutput) { return func(t *testing.T, output infratests.TerraformOutput) { id, endpoint, primaryMasterKey, connectionStrings := getModuleOutputs(output, outputName) validateOutputs(t, id, endpoint, primaryMasterKey, connectionStrings) } }
[ "\"ARM_SUBSCRIPTION_ID\"" ]
[]
[ "ARM_SUBSCRIPTION_ID" ]
[]
["ARM_SUBSCRIPTION_ID"]
go
1
0
uitest.py
#!/usr/bin/env python import os from systori import settings settings.INSTALLED_APPS = [a for a in settings.INSTALLED_APPS if a != "debug_toolbar"] os.environ.setdefault("DJANGO_SETTINGS_MODULE", "systori.settings.travis") import django django.setup() import unittest from django.core.management import call_command from django.db import connections, DEFAULT_DB_ALIAS from django.test.testcases import LiveServerThread, _StaticFilesHandler from selenium import webdriver from sauceclient import SauceClient from systori.apps.accounting.workflow import create_chart_of_accounts TRAVIS_JOB_NUMBER = os.environ.get("TRAVIS_JOB_NUMBER", 1) TRAVIS_BUILD_NUMBER = os.environ.get("TRAVIS_BUILD_NUMBER", 1) CHROME_VERSION = "43.0" SAUCE_BROWSERS = [ ("OS X 10.11", "safari", "9.0"), # ("OS X 10.10", "chrome", CHROME_VERSION), # ("Windows 7", "internet explorer", "11.0"), # ("Windows 7", "chrome", CHROME_VERSION), ] SELENIUM_WAIT_TIME = 15 # max seconds to wait for page to load before failing SAUCE_PORTS = [8003, 8031, 8765] # per: https://docs.saucelabs.com/reference/sauce-connect/#can-i-access-applications-on-localhost- def make_suite(driver, server, sauce=None): main_suite = unittest.defaultTestLoader.discover("uitests") main_suite.sauce = sauce main_suite.driver = driver for suite in main_suite: for sub_suite in suite: for test in sub_suite: test.driver = driver test.server = server return main_suite def sauce_update(suite, result): build_num = TRAVIS_BUILD_NUMBER if result.wasSuccessful(): suite.sauce.jobs.update_job( suite.driver.session_id, passed=True, build_num=build_num, public="share" ) else: suite.sauce.jobs.update_job( suite.driver.session_id, passed=False, build_num=build_num, public="share" ) def run_tests(runner, suite, cleanup, keep_open): name = "{platform} {browserName} {version}".format( **suite.driver.desired_capabilities ) print("Starting: {}".format(name)) result = runner.run(suite) if cleanup: cleanup(suite, result) if not keep_open: suite.driver.quit() if result.wasSuccessful(): print("Passed: {}".format(name)) else: print("Failed: {}".format(name)) def start_django(): server = LiveServerThread("localhost", SAUCE_PORTS, _StaticFilesHandler) server.daemon = True server.start() return server def setup_database(verbosity=3): creation = connections[DEFAULT_DB_ALIAS].creation test_database_name = creation._get_test_db_name() settings.DATABASES[creation.connection.alias]["NAME"] = test_database_name creation.connection.settings_dict["NAME"] = test_database_name if True: creation._create_test_db(verbosity=verbosity, autoclobber=False, keepdb=False) creation.connection.close() call_command( "migrate", verbosity=max(verbosity - 1, 0), interactive=False, database=creation.connection.alias, test_flush=True, ) def setup_test_data(): from systori.apps.company.models import Company, Access from systori.apps.user.models import User from systori.apps.project.models import Project from systori.apps.document.models import Letterhead, DocumentSettings company = Company.objects.create(schema="test", name="Test") company.activate() user = User.objects.create_user( "[email protected]", "pass", first_name="Standard", last_name="Worker", language="en", ) Access.objects.create(user=user, company=company, is_staff=True) user = User.objects.create_user( "[email protected]", "pass", first_name="Standard2", last_name="Worker2", language="en", ) Access.objects.create(user=user, company=company, is_staff=True) create_chart_of_accounts() Project.objects.create(name="Template Project", is_template=True) letterhead_pdf = os.path.join( settings.BASE_DIR, "apps/document/test_data/letterhead.pdf" ) letterhead = Letterhead.objects.create( name="Test Letterhead", letterhead_pdf=letterhead_pdf ) DocumentSettings.objects.create( language="en", evidence_letterhead=letterhead, proposal_letterhead=letterhead, invoice_letterhead=letterhead, ) def main(driver_names, keep_open, not_parallel): setup_database() setup_test_data() server = start_django() # async # while django is starting we setup the webdrivers... suites = [] if "chrome" in driver_names: chrome = webdriver.Chrome("chromedriver") chrome.implicitly_wait(SELENIUM_WAIT_TIME) suites.append((make_suite(chrome, server), None)) if "firefox" in driver_names: firefox = webdriver.Firefox() firefox.implicitly_wait(SELENIUM_WAIT_TIME) suites.append((make_suite(firefox, server), None)) if "saucelabs" in driver_names: username = os.environ.get("SAUCE_USERNAME", "systori_ci") access_key = os.environ.get( "SAUCE_ACCESS_KEY", "1c7a1f7b-9890-46ef-89d0-93e435df146a" ) sauce = SauceClient(username, access_key) sauce_url = "http://%s:%[email protected]:80/wd/hub" % ( username, access_key, ) for platform, browser, version in SAUCE_BROWSERS: saucelabs = webdriver.Remote( desired_capabilities={ "name": "systori ui tests", "platform": platform, "browserName": browser, "version": version, "tunnel-identifier": TRAVIS_JOB_NUMBER, "build": TRAVIS_BUILD_NUMBER, }, command_executor=sauce_url, ) saucelabs.implicitly_wait(SELENIUM_WAIT_TIME) suites.append((make_suite(saucelabs, server, sauce), sauce_update)) # if django is still not ready, then we wait... server.is_ready.wait() # if django couldn't start, quit() the webdrivers and raise error if server.error: for suite, cleanup in suites: suite.driver.quit() raise server.error from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=4) as executor: for suite, cleanup in suites: runner = unittest.TextTestRunner() if not_parallel: run_tests(runner, suite, cleanup, keep_open) else: executor.submit(run_tests, runner, suite, cleanup, keep_open) # all done, stop django server.terminate() server.join() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Test Systori UI using Selenium.") parser.add_argument( "--not-parallel", action="store_true", help="Do not run tests in parallel." ) parser.add_argument( "--keep-open", action="store_true", help="Keep local browser open after running tests.", ) parser.add_argument( "drivers", nargs="+", choices=["saucelabs", "chrome", "firefox"], help="Run on Sauce Labs and/or on local browser.", ) args = parser.parse_args() main(args.drivers, args.keep_open, args.not_parallel)
[]
[]
[ "SAUCE_USERNAME", "TRAVIS_BUILD_NUMBER", "SAUCE_ACCESS_KEY", "TRAVIS_JOB_NUMBER" ]
[]
["SAUCE_USERNAME", "TRAVIS_BUILD_NUMBER", "SAUCE_ACCESS_KEY", "TRAVIS_JOB_NUMBER"]
python
4
0
services/services_test.go
package services_test import ( "testing" "time" "github.com/rbusquet/cosmic-go/repository" "github.com/rbusquet/cosmic-go/services" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" ) type ServicesSuite struct { suite.Suite } func (suite *ServicesSuite) TestReturnsAllocation() { repo := repository.NewFakeRepository() services.AddBatch("b1", "COMPLICATED-LAMP", 100, time.Time{}, repo) result, err := services.Allocate("o1", "COMPLICATED-LAMP", 10, repo) assert.NoError(suite.T(), err) assert.Equal(suite.T(), "b1", result) } func (suite *ServicesSuite) TestErrorForInvalidSku() { repo := repository.NewFakeRepository() services.AddBatch("b1", "AREALSKU", 100, time.Time{}, repo) _, err := services.Allocate("o1", "NONEXISTENTSKU", 10, repo) assert.EqualError(suite.T(), err, "Invalid SKU NONEXISTENTSKU") } func (suite *ServicesSuite) TestSaves() { repo := repository.NewFakeRepository() services.AddBatch("b1", "SOMETHING-ELSE", 100, time.Time{}, repo) services.Allocate("o1", "SOMETHING-ELSE", 10, repo) assert.Equal(suite.T(), true, repo.Saved) } func (suite *ServicesSuite) TestPrefersWarehouseStockBatchesToShipments() { tomorrow := time.Now().AddDate(0, 0, 1) repo := repository.NewFakeRepository() services.AddBatch("in-stock-batch", "RETRO-CLOCK", 100, time.Time{}, repo) services.AddBatch("shipment-batch", "RETRO-CLOCK", 100, tomorrow, repo) batch, err := services.Allocate("oref", "RETRO-CLOCK", 10, repo) assert.NoError(suite.T(), err) assert.Equal(suite.T(), "in-stock-batch", batch) } func (suite *ServicesSuite) TestPrefersEarlierBatches() { today := time.Now() tomorrow := today.AddDate(0, 0, 1) later := today.AddDate(0, 1, 0) repo := repository.NewFakeRepository() services.AddBatch("normal-batch", "MINIMALIST-SPOON", 100, tomorrow, repo) services.AddBatch("speedy-batch", "MINIMALIST-SPOON", 100, today, repo) services.AddBatch("slow-batch", "MINIMALIST-SPOON", 100, later, repo) batch, err := services.Allocate("order1", "MINIMALIST-SPOON", 10, repo) assert.NoError(suite.T(), err) assert.Equal(suite.T(), "speedy-batch", batch) } func (suite *ServicesSuite) TestReturnsAllocatedBatchRef() { tomorrow := time.Now().AddDate(0, 0, 1) repo := repository.NewFakeRepository() services.AddBatch("in-stock-batch", "HIGHBROW-POSTER", 100, time.Time{}, repo) services.AddBatch("shipment-batch", "HIGHBROW-POSTER", 100, tomorrow, repo) allocation, err := services.Allocate("oref", "HIGHBROW-POSTER", 10, repo) assert.NoError(suite.T(), err) assert.Equal(suite.T(), "in-stock-batch", allocation) } func (suite *ServicesSuite) TestRaisesOutOfStockExceptionIfCannotAllocate() { repo := repository.NewFakeRepository() services.AddBatch("batch1", "SMALL-FORK", 10, time.Now(), repo) _, err := services.Allocate("order1", "SMALL-FORK", 10, repo) assert.NoError(suite.T(), err) _, err = services.Allocate("order2", "SMALL-FORK", 10, repo) assert.EqualError(suite.T(), err, "Out of stock for SKU SMALL-FORK") } func (suite *ServicesSuite) TestAddBatch() { repo := repository.NewFakeRepository() services.AddBatch("b1", "CRUNCHY-ARMCHAIR", 100, time.Now(), repo) assert.NotNil(suite.T(), repo.Get("b1")) } func TestServicesSuite(t *testing.T) { suite.Run(t, new(ServicesSuite)) }
[]
[]
[]
[]
[]
go
null
null
null
api/ident/service.go
package ident import ( "encoding/json" "fmt" "os" "github.com/provideplatform/provide-go/api" "github.com/provideplatform/provide-go/common" ) const defaultIdentHost = "ident.provide.services" const defaultIdentPath = "api/v1" const defaultIdentScheme = "https" // Service for the ident api type Service struct { api.Client } // InitDefaultIdentService convenience method to initialize a default `ident.Service` (i.e., production) instance func InitDefaultIdentService(token *string) *Service { return &Service{ api.Client{ Host: defaultIdentHost, Path: defaultIdentPath, Scheme: defaultIdentScheme, Token: token, }, } } // InitIdentService convenience method to initialize an `ident.Service` instance func InitIdentService(token *string) *Service { host := defaultIdentHost if os.Getenv("IDENT_API_HOST") != "" { host = os.Getenv("IDENT_API_HOST") } path := defaultIdentPath if os.Getenv("IDENT_API_PATH") != "" { path = os.Getenv("IDENT_API_PATH") } scheme := defaultIdentScheme if os.Getenv("IDENT_API_SCHEME") != "" { scheme = os.Getenv("IDENT_API_SCHEME") } return &Service{ api.Client{ Host: host, Path: path, Scheme: scheme, Token: token, }, } } // Authenticate a user by email address and password, returning a newly-authorized API token func Authenticate(email, passwd string) (*AuthenticationResponse, error) { prvd := InitIdentService(nil) status, resp, err := prvd.Post("authenticate", map[string]interface{}{ "email": email, "password": passwd, "scope": "offline_access", }) if err != nil { return nil, err } // FIXME... authresp := &AuthenticationResponse{} raw, _ := json.Marshal(resp) err = json.Unmarshal(raw, &authresp) if err != nil { return nil, fmt.Errorf("failed to authenticate user; status: %d; %s", status, err.Error()) } else if status != 201 { return nil, fmt.Errorf("failed to authenticate user; status: %d", status) } return authresp, nil } // CreateApplication on behalf of the given API token func CreateApplication(token string, params map[string]interface{}) (*Application, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Post("applications", params) if err != nil { return nil, err } if status != 201 { return nil, fmt.Errorf("failed to create application; status: %v", status) } // FIXME... app := &Application{} appraw, _ := json.Marshal(resp) err = json.Unmarshal(appraw, &app) if err != nil { return nil, fmt.Errorf("failed to create application; status: %v; %s", status, err.Error()) } return app, nil } // UpdateApplication using the given API token, application id and params func UpdateApplication(token, applicationID string, params map[string]interface{}) error { uri := fmt.Sprintf("applications/%s", applicationID) status, _, err := InitIdentService(common.StringOrNil(token)).Put(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to update application; status: %v", status) } return nil } // DeleteApplication soft-deletes the application using the given API token func DeleteApplication(token, applicationID string) error { err := UpdateApplication(token, applicationID, map[string]interface{}{ "hidden": true, }) if err != nil { return err } return nil } // ListApplications retrieves a paginated list of applications scoped to the given API token func ListApplications(token string, params map[string]interface{}) ([]*Application, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Get("applications", params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list applications; status: %v", status) } apps := make([]*Application, 0) for _, item := range resp.([]interface{}) { app := &Application{} appraw, _ := json.Marshal(item) json.Unmarshal(appraw, &app) apps = append(apps, app) } return apps, nil } // GetApplicationDetails retrives application details for the given API token and application id func GetApplicationDetails(token, applicationID string, params map[string]interface{}) (*Application, error) { uri := fmt.Sprintf("applications/%s", applicationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to fetch application details; status: %v", status) } // FIXME... app := &Application{} appraw, _ := json.Marshal(resp) err = json.Unmarshal(appraw, &app) if err != nil { return nil, fmt.Errorf("failed to fetch application details; status: %v; %s", status, err.Error()) } return app, nil } // ListApplicationTokens retrieves a paginated list of application API tokens func ListApplicationTokens(token, applicationID string, params map[string]interface{}) ([]*Token, error) { uri := fmt.Sprintf("applications/%s/tokens", applicationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list application tokens; status: %v", status) } tkns := make([]*Token, 0) for _, item := range resp.([]interface{}) { tkn := &Token{} tknraw, _ := json.Marshal(item) json.Unmarshal(tknraw, &tkn) tkns = append(tkns, tkn) } return tkns, nil } // ListApplicationInvitations retrieves a paginated list of invitations scoped to the given API token func ListApplicationInvitations(token, applicationID string, params map[string]interface{}) ([]*User, error) { uri := fmt.Sprintf("applications/%s/invitations", applicationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list application invitations; status: %v", status) } users := make([]*User, 0) for _, item := range resp.([]interface{}) { usr := &User{} usrraw, _ := json.Marshal(item) json.Unmarshal(usrraw, &usr) users = append(users, usr) } return users, nil } // ListApplicationOrganizations retrieves a paginated list of organizations scoped to the given API token func ListApplicationOrganizations(token, applicationID string, params map[string]interface{}) ([]*Organization, error) { uri := fmt.Sprintf("applications/%s/organizations", applicationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list application organizations; status: %v", status) } orgs := make([]*Organization, 0) for _, item := range resp.([]interface{}) { org := &Organization{} orgraw, _ := json.Marshal(item) json.Unmarshal(orgraw, &org) orgs = append(orgs, org) } return orgs, nil } // CreateApplicationOrganization associates an organization with an application func CreateApplicationOrganization(token, applicationID string, params map[string]interface{}) error { uri := fmt.Sprintf("applications/%s/organizations", applicationID) status, _, err := InitIdentService(common.StringOrNil(token)).Post(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to associate application organization; status: %v", status) } return nil } // DeleteApplicationOrganization disassociates an organization with an application func DeleteApplicationOrganization(token, applicationID, organizationID string) error { uri := fmt.Sprintf("applications/%s/organizations/%s", applicationID, organizationID) status, _, err := InitIdentService(common.StringOrNil(token)).Delete(uri) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to disassociate application organization; status: %v", status) } return nil } // ListApplicationUsers retrieves a paginated list of users scoped to the given API token func ListApplicationUsers(token, applicationID string, params map[string]interface{}) ([]*User, error) { uri := fmt.Sprintf("applications/%s/users", applicationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list application users; status: %v", status) } users := make([]*User, 0) for _, item := range resp.([]interface{}) { usr := &User{} usrraw, _ := json.Marshal(item) json.Unmarshal(usrraw, &usr) users = append(users, usr) } return users, nil } // CreateApplicationUser associates a user with an application func CreateApplicationUser(token, applicationID string, params map[string]interface{}) error { uri := fmt.Sprintf("applications/%s/users", applicationID) status, _, err := InitIdentService(common.StringOrNil(token)).Post(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to associate application user; status: %v", status) } return nil } // DeleteApplicationUser disassociates a user with an application func DeleteApplicationUser(token, applicationID, userID string) error { uri := fmt.Sprintf("applications/%s/users/%s", applicationID, userID) status, _, err := InitIdentService(common.StringOrNil(token)).Delete(uri) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to disassociate application user; status: %v", status) } return nil } // CreateApplicationToken creates a new API token for the given application ID. func CreateApplicationToken(token, applicationID string, params map[string]interface{}) (*Token, error) { params["application_id"] = applicationID status, resp, err := InitIdentService(common.StringOrNil(token)).Post("tokens", params) if err != nil { return nil, err } // FIXME... tkn := &Token{} tknraw, _ := json.Marshal(resp) err = json.Unmarshal(tknraw, &tkn) if err != nil { return nil, fmt.Errorf("failed to authorize application token; status: %v; %s", status, err.Error()) } return tkn, nil } // ListOrganizations retrieves a paginated list of organizations scoped to the given API token func ListOrganizations(token string, params map[string]interface{}) ([]*Organization, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Get("organizations", params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list organizations; status: %v", status) } orgs := make([]*Organization, 0) for _, item := range resp.([]interface{}) { org := &Organization{} orgraw, _ := json.Marshal(item) json.Unmarshal(orgraw, &org) orgs = append(orgs, org) } return orgs, nil } // CreateToken creates a new API token. func CreateToken(token string, params map[string]interface{}) (*Token, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Post("tokens", params) if err != nil { return nil, err } if status != 201 { return nil, fmt.Errorf("failed to authorize token; status: %v", status) } // FIXME... tkn := &Token{} tknraw, _ := json.Marshal(resp) err = json.Unmarshal(tknraw, &tkn) if err != nil { return nil, fmt.Errorf("failed to authorize tokens; status: %v; %s", status, err.Error()) } return tkn, nil } // ListTokens retrieves a paginated list of API tokens scoped to the given API token func ListTokens(token string, params map[string]interface{}) ([]*Token, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Get("tokens", params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list application tokens; status: %v", status) } tkns := make([]*Token, 0) for _, item := range resp.([]interface{}) { tkn := &Token{} tknraw, _ := json.Marshal(item) json.Unmarshal(tknraw, &tkn) tkns = append(tkns, tkn) } return tkns, nil } // GetTokenDetails retrieves details for the given API token id func GetTokenDetails(token, tokenID string, params map[string]interface{}) (*Token, error) { uri := fmt.Sprintf("tokens/%s", tokenID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to fetch token details; status: %v", status) } // FIXME... tkn := &Token{} tknraw, _ := json.Marshal(resp) err = json.Unmarshal(tknraw, &tkn) if err != nil { return nil, fmt.Errorf("failed to fetch token details; status: %v; %s", status, err.Error()) } return tkn, nil } // DeleteToken removes a previously authorized API token, effectively deauthorizing future calls using the token func DeleteToken(token, tokenID string) error { uri := fmt.Sprintf("tokens/%s", tokenID) status, _, err := InitIdentService(common.StringOrNil(token)).Delete(uri) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to revoke token; status: %v", status) } return nil } // CreateOrganization creates a new organization func CreateOrganization(token string, params map[string]interface{}) (*Organization, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Post("organizations", params) if err != nil { return nil, err } if status != 201 { return nil, fmt.Errorf("failed to create organization; status: %v", status) } // FIXME... org := &Organization{} orgraw, _ := json.Marshal(resp) err = json.Unmarshal(orgraw, &org) if err != nil { return nil, fmt.Errorf("failed to create organization; status: %v; %s", status, err.Error()) } return org, nil } // GetOrganizationDetails retrieves details for the given organization func GetOrganizationDetails(token, organizationID string, params map[string]interface{}) (*Organization, error) { uri := fmt.Sprintf("organizations/%s", organizationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to fetch organization; status: %v", status) } // FIXME... org := &Organization{} orgraw, _ := json.Marshal(resp) err = json.Unmarshal(orgraw, &org) if err != nil { return nil, fmt.Errorf("failed to fetch organization details; status: %v; %s", status, err.Error()) } return org, nil } // UpdateOrganization updates an organization func UpdateOrganization(token, organizationID string, params map[string]interface{}) error { uri := fmt.Sprintf("organizations/%s", organizationID) status, _, err := InitIdentService(common.StringOrNil(token)).Put(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to update associated organization user; status: %v", status) } return nil } // CreateInvitation creates a user invitation func CreateInvitation(token string, params map[string]interface{}) error { status, _, err := InitIdentService(common.StringOrNil(token)).Post("invitations", params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to create invitation; status: %v", status) } return nil } // CreateUser creates a new user for which API tokens and managed signing identities can be authorized func CreateUser(token string, params map[string]interface{}) (*User, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Post("users", params) if err != nil { return nil, err } // FIXME... usr := &User{} usrraw, _ := json.Marshal(resp) err = json.Unmarshal(usrraw, &usr) if err != nil { return nil, fmt.Errorf("failed to create user; status: %v; %s", status, err.Error()) } return usr, nil } // ListOrganizationUsers retrieves a paginated list of users scoped to an organization func ListOrganizationUsers(token, orgID string, params map[string]interface{}) ([]*User, error) { uri := fmt.Sprintf("organizations/%s/users", orgID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list users; status: %v", status) } users := make([]*User, 0) for _, item := range resp.([]interface{}) { usr := &User{} usrraw, _ := json.Marshal(item) json.Unmarshal(usrraw, &usr) users = append(users, usr) } return users, nil } // CreateOrganizationUser associates a user with an organization func CreateOrganizationUser(token, orgID string, params map[string]interface{}) error { uri := fmt.Sprintf("organizations/%s/users", orgID) status, _, err := InitIdentService(common.StringOrNil(token)).Post(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to associate organization user; status: %v", status) } return nil } // UpdateOrganizationUser updates an associated organization user= func UpdateOrganizationUser(token, orgID, userID string, params map[string]interface{}) error { uri := fmt.Sprintf("organizations/%s/users/%s", orgID, userID) status, _, err := InitIdentService(common.StringOrNil(token)).Put(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to update associated organization user; status: %v", status) } return nil } // DeleteOrganizationUser disassociates a user with an organization func DeleteOrganizationUser(token, orgID, userID string) error { uri := fmt.Sprintf("organizations/%s/users/%s", orgID, userID) status, _, err := InitIdentService(common.StringOrNil(token)).Delete(uri) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to disassociate organization user; status: %v", status) } return nil } // ListOrganizationInvitations retrieves a paginated list of organization invitations scoped to the given API token func ListOrganizationInvitations(token, organizationID string, params map[string]interface{}) ([]*User, error) { uri := fmt.Sprintf("organizations/%s/invitations", organizationID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list organization invitations; status: %v", status) } users := make([]*User, 0) for _, item := range resp.([]interface{}) { usr := &User{} usrraw, _ := json.Marshal(item) json.Unmarshal(usrraw, &usr) users = append(users, usr) } return users, nil } // ListUsers retrieves a paginated list of users scoped to the given API token func ListUsers(token string, params map[string]interface{}) ([]*User, error) { status, resp, err := InitIdentService(common.StringOrNil(token)).Get("users", params) if err != nil { return nil, err } if status != 200 { return nil, fmt.Errorf("failed to list users; status: %v", status) } users := make([]*User, 0) for _, item := range resp.([]interface{}) { usr := &User{} usrraw, _ := json.Marshal(item) json.Unmarshal(usrraw, &usr) users = append(users, usr) } return users, nil } // GetUserDetails retrieves details for the given user id func GetUserDetails(token, userID string, params map[string]interface{}) (*User, error) { uri := fmt.Sprintf("users/%s", userID) status, resp, err := InitIdentService(common.StringOrNil(token)).Get(uri, params) if err != nil { return nil, err } // FIXME... usr := &User{} usrraw, _ := json.Marshal(resp) err = json.Unmarshal(usrraw, &usr) if err != nil { return nil, fmt.Errorf("failed to fetch user details; status: %v; %s", status, err.Error()) } return usr, nil } // UpdateUser updates an existing user func UpdateUser(token, userID string, params map[string]interface{}) error { uri := fmt.Sprintf("users/%s", userID) status, _, err := InitIdentService(common.StringOrNil(token)).Put(uri, params) if err != nil { return err } if status != 204 { return fmt.Errorf("failed to update user; status: %v", status) } return nil } // RequestPasswordReset initiates a password reset request func RequestPasswordReset(token, applicationID *string, email string) error { params := map[string]interface{}{ "email": email, } if applicationID != nil { params["application_id"] = applicationID } status, _, err := InitIdentService(token).Post("users/reset_password", params) if err != nil { return fmt.Errorf("failed to request password reset; status: %v; %s", status, err.Error()) } if status != 204 { return fmt.Errorf("failed to request password reset for user: %s; status: %v", email, status) } return nil } // ResetPassword completes a previously-requested password reset operation for a user func ResetPassword(token *string, resetPasswordToken, passwd string) error { uri := fmt.Sprintf("users/reset_password/%s", resetPasswordToken) status, _, err := InitIdentService(token).Post(uri, map[string]interface{}{ "password": passwd, }) if err != nil { return fmt.Errorf("failed to reset password; status: %v; %s", status, err.Error()) } if status != 204 { return fmt.Errorf("failed to reset password; status: %v", status) } return nil } // Status returns the status of the endpoint func Status() error { host := defaultIdentHost if os.Getenv("IDENT_API_HOST") != "" { host = os.Getenv("IDENT_API_HOST") } scheme := defaultIdentScheme if os.Getenv("IDENT_API_SCHEME") != "" { scheme = os.Getenv("IDENT_API_SCHEME") } service := &Service{ api.Client{ Host: host, Path: "", Scheme: scheme, }, } status, _, err := service.Get("status", map[string]interface{}{}) if err != nil { return fmt.Errorf("failed to fetch status; %s", err.Error()) } if status != 200 { return fmt.Errorf("status endpoint returned %d status code", status) } return nil } // GetJWKs returns the set of keys containing the public keys used to verify JWTs func GetJWKs() ([]*JSONWebKey, error) { host := defaultIdentHost if os.Getenv("IDENT_API_HOST") != "" { host = os.Getenv("IDENT_API_HOST") } scheme := defaultIdentScheme if os.Getenv("IDENT_API_SCHEME") != "" { scheme = os.Getenv("IDENT_API_SCHEME") } service := &Service{ api.Client{ Host: host, Path: "", Scheme: scheme, }, } status, resp, err := service.Get(".well-known/keys", map[string]interface{}{}) if err != nil { return nil, fmt.Errorf("failed to fetch well-known JWKs; %s", err.Error()) } if status != 200 { return nil, fmt.Errorf("well-known JWKs endpoint returned %d status code", status) } keys := make([]*JSONWebKey, 0) for _, item := range resp.([]interface{}) { key := &JSONWebKey{} keyraw, _ := json.Marshal(item) json.Unmarshal(keyraw, &key) keys = append(keys, key) } return keys, nil }
[ "\"IDENT_API_HOST\"", "\"IDENT_API_HOST\"", "\"IDENT_API_PATH\"", "\"IDENT_API_PATH\"", "\"IDENT_API_SCHEME\"", "\"IDENT_API_SCHEME\"", "\"IDENT_API_HOST\"", "\"IDENT_API_HOST\"", "\"IDENT_API_SCHEME\"", "\"IDENT_API_SCHEME\"", "\"IDENT_API_HOST\"", "\"IDENT_API_HOST\"", "\"IDENT_API_SCHEME\"", "\"IDENT_API_SCHEME\"" ]
[]
[ "IDENT_API_SCHEME", "IDENT_API_PATH", "IDENT_API_HOST" ]
[]
["IDENT_API_SCHEME", "IDENT_API_PATH", "IDENT_API_HOST"]
go
3
0
search.go
package main import ( "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "sort" "github.com/joho/godotenv" ) const BASE_URL = "https://www.googleapis.com/customsearch/v1?" func search(queryItem string, startIndex int) ([]byte, error) { // read CSE_ID and CSE_KEY from .env file with godotenv package godotenv.Load() CSEID := os.Getenv("CSE_ID") CSEKEY := os.Getenv("CSE_KEY") PROXY_HOST := os.Getenv("PROXY_HOST") params := map[string]interface{}{ "cx": CSEID, "q": url.QueryEscape(queryItem), "key": CSEKEY, "num": 10, "start": startIndex, } // creating proxy string proxyURL, err := url.Parse(PROXY_HOST) if err != nil { log.Print(err) return nil, err } transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)} client := &http.Client{Transport: transport} rawUrl, err := url.Parse(BASE_URL + paramsToQuery(params)) if err != nil { // log err and return log.Print(err) return nil, err } // generating the HTTP GET request through http proxy request, err := http.NewRequest("GET", rawUrl.String(), nil) if err != nil { log.Print(err) return nil, err } response, err := client.Do(request) if err != nil { log.Print(err) return nil, err } defer response.Body.Close() data, err := ioutil.ReadAll(response.Body) if err != nil { log.Print(err) return nil, err } return data, nil } func paramsToQuery(params map[string]interface{}) string { var query string keys := make([]string, 0, len(params)) for k := range params { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { query += fmt.Sprintf("%s=%v&", k, params[k]) } return query[:len(query)-1] }
[ "\"CSE_ID\"", "\"CSE_KEY\"", "\"PROXY_HOST\"" ]
[]
[ "PROXY_HOST", "CSE_KEY", "CSE_ID" ]
[]
["PROXY_HOST", "CSE_KEY", "CSE_ID"]
go
3
0
dashboard/dashboard/pinpoint/models/job.py
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from __future__ import division from __future__ import absolute_import import datetime import logging import os import sys import traceback import uuid from google.appengine.api import datastore_errors from google.appengine.api import taskqueue from google.appengine.ext import deferred from google.appengine.ext import ndb from google.appengine.runtime import apiproxy_errors from dashboard import update_bug_with_results from dashboard.common import utils from dashboard.models import histogram from dashboard.pinpoint.models import errors from dashboard.pinpoint.models import job_state from dashboard.pinpoint.models import results2 from dashboard.pinpoint.models import scheduler from dashboard.pinpoint.models import timing_record from dashboard.services import gerrit_service from dashboard.services import issue_tracker_service from tracing.value.diagnostics import reserved_infos # We want this to be fast to minimize overhead while waiting for tasks to # finish, but don't want to consume too many resources. _TASK_INTERVAL = 60 _CRYING_CAT_FACE = u'\U0001f63f' _INFINITY = u'\u221e' _RIGHT_ARROW = u'\u2192' _ROUND_PUSHPIN = u'\U0001f4cd' _MAX_RECOVERABLE_RETRIES = 3 OPTION_STATE = 'STATE' OPTION_TAGS = 'TAGS' OPTION_ESTIMATE = 'ESTIMATE' COMPARISON_MODES = job_state.COMPARISON_MODES RETRY_OPTIONS = taskqueue.TaskRetryOptions(task_retry_limit=8, min_backoff_seconds=2) CREATED_COMMENT_FORMAT = u'''{title} {url} The job has been scheduled on the "{configuration}" queue which currently has {pending} pending jobs. ''' def JobFromId(job_id): """Get a Job object from its ID. Its ID is just its key as a hex string. Users of Job should not have to import ndb. This function maintains an abstraction layer that separates users from the Datastore details. """ job_key = ndb.Key('Job', int(job_id, 16)) return job_key.get() class BenchmarkArguments(ndb.Model): """Structured version of the ad-hoc 'arguments' JSON for a Job. This class formalises the structure of the arguments passed into, and supported by the Job model. This is intended to be used as a structured property of Job, not a standalone entity. """ benchmark = ndb.StringProperty(indexed=True) story = ndb.StringProperty(indexed=True) story_tags = ndb.StringProperty(indexed=True) chart = ndb.StringProperty(indexed=True) statistic = ndb.StringProperty(indexed=True) @classmethod def FromArgs(cls, args): return cls( benchmark=args.get('benchmark'), story=args.get('story'), story_tags=args.get('story_tags'), chart=args.get('chart'), statistic=args.get('statistic'), ) class Job(ndb.Model): """A Pinpoint job.""" state = ndb.PickleProperty(required=True, compressed=True) ##### # Job arguments passed in through the API. ##### # Request parameters. arguments = ndb.JsonProperty(required=True) # TODO: The bug id is only used for posting bug comments when a job starts and # completes. This probably should not be the responsibility of Pinpoint. bug_id = ndb.IntegerProperty() comparison_mode = ndb.StringProperty() # The Gerrit server url and change id of the code review to update upon # completion. gerrit_server = ndb.StringProperty() gerrit_change_id = ndb.StringProperty() # User-provided name of the job. name = ndb.StringProperty() tags = ndb.JsonProperty() # Email of the job creator. user = ndb.StringProperty() ##### # Job state generated by running the job. ##### created = ndb.DateTimeProperty(required=True, auto_now_add=True) # This differs from "created" since there may be a lag between the time it # was queued and when the scheduler actually starts the job. started_time = ndb.DateTimeProperty(required=False) # Don't use `auto_now` for `updated`. When we do data migration, we need # to be able to modify the Job without changing the Job's completion time. updated = ndb.DateTimeProperty(required=True, auto_now_add=True) started = ndb.BooleanProperty(default=True) completed = ndb.ComputedProperty(lambda self: self.started and not self.task) failed = ndb.ComputedProperty(lambda self: bool(self.exception_details_dict)) running = ndb.ComputedProperty(lambda self: self.started and not self. cancelled and self.task and len(self.task) > 0) cancelled = ndb.BooleanProperty(default=False) cancel_reason = ndb.TextProperty() # The name of the Task Queue task this job is running on. If it's present, the # job is running. The task is also None for Task Queue retries. task = ndb.StringProperty() # The contents of any Exception that was thrown to the top level. # If it's present, the job failed. exception = ndb.TextProperty() exception_details = ndb.JsonProperty() difference_count = ndb.IntegerProperty() retry_count = ndb.IntegerProperty(default=0) # We expose the configuration as a first-class property of the Job. configuration = ndb.ComputedProperty( lambda self: self.arguments.get('configuration')) # Pull out the benchmark, chart, and statistic as a structured property at the # top-level, so that we can analyse these in a structured manner. benchmark_arguments = ndb.StructuredProperty(BenchmarkArguments) # TODO(simonhatch): After migrating all Pinpoint entities, this can be # removed. # crbug.com/971370 @classmethod def _post_get_hook(cls, key, future): # pylint: disable=unused-argument e = future.get_result() if not e: return if not getattr(e, 'exception_details'): e.exception_details = e.exception_details_dict # TODO(simonhatch): After migrating all Pinpoint entities, this can be # removed. # crbug.com/971370 @property def exception_details_dict(self): if hasattr(self, 'exception_details'): if self.exception_details: return self.exception_details if hasattr(self, 'exception'): exc = self.exception if exc: return {'message': exc.splitlines()[-1], 'traceback': exc} return None @classmethod def New(cls, quests, changes, arguments=None, bug_id=None, comparison_mode=None, comparison_magnitude=None, gerrit_server=None, gerrit_change_id=None, name=None, pin=None, tags=None, user=None): """Creates a new Job, adds Changes to it, and puts it in the Datstore. Args: quests: An iterable of Quests for the Job to run. changes: An iterable of the initial Changes to run on. arguments: A dict with the original arguments used to start the Job. bug_id: A monorail issue id number to post Job updates to. comparison_mode: Either 'functional' or 'performance', which the Job uses to figure out whether to perform a functional or performance bisect. If None, the Job will not automatically add any Attempts or Changes. comparison_magnitude: The estimated size of the regression or improvement to look for. Smaller magnitudes require more repeats. gerrit_server: Server of the Gerrit code review to update with job results. gerrit_change_id: Change id of the Gerrit code review to update with job results. name: The user-provided name of the Job. pin: A Change (Commits + Patch) to apply to every Change in this Job. tags: A dict of key-value pairs used to filter the Jobs listings. user: The email of the Job creator. Returns: A Job object. """ state = job_state.JobState( quests, comparison_mode=comparison_mode, comparison_magnitude=comparison_magnitude, pin=pin) args = arguments or {} job = cls(state=state, arguments=args, bug_id=bug_id, comparison_mode=comparison_mode, gerrit_server=gerrit_server, gerrit_change_id=gerrit_change_id, name=name, tags=tags, user=user, started=False, cancelled=False) for c in changes: job.AddChange(c) # Pull out the benchmark arguments to the top-level. job.benchmark_arguments = BenchmarkArguments.FromArgs(args) job.put() # At this point we already have an ID, so we should go through each of the # quests associated with the state, and provide the Job ID through a common # API. job.state.PropagateJob(job) job.put() return job def PostCreationUpdate(self): title = _ROUND_PUSHPIN + ' Pinpoint job created and queued.' pending = 0 if self.configuration: try: pending = scheduler.QueueStats(self.configuration).get('queued_jobs', 0) except (scheduler.QueueNotFound, ndb.BadRequestError) as e: logging.warning('Error encountered fetching queue named "%s": %s ', self.configuration, e) comment = CREATED_COMMENT_FORMAT.format( title=title, url=self.url, configuration=self.configuration if self.configuration else '(None)', pending=pending) deferred.defer( _PostBugCommentDeferred, self.bug_id, comment, send_email=True, _retry_options=RETRY_OPTIONS) @property def job_id(self): return '%x' % self.key.id() @property def status(self): if self.failed: return 'Failed' if self.cancelled: return 'Cancelled' if self.completed: return 'Completed' if self.running: return 'Running' # By default, we assume that the Job is queued. return 'Queued' @property def url(self): host = os.environ['HTTP_HOST'] # TODO(crbug.com/939723): Remove this workaround when not needed. if host == 'pinpoint.chromeperf.appspot.com': host = 'pinpoint-dot-chromeperf.appspot.com' return 'https://%s/job/%s' % (host, self.job_id) @property def results_url(self): if not self.task: url = results2.GetCachedResults2(self) if url: return url # Point to the default status page if no results are available. return '/results2/%s' % self.job_id @property def auto_name(self): if self.name: return self.name if self.comparison_mode == job_state.FUNCTIONAL: name = 'Functional bisect' elif self.comparison_mode == job_state.PERFORMANCE: name = 'Performance bisect' else: name = 'Try job' if self.configuration: name += ' on ' + self.configuration if 'benchmark' in self.arguments: name += '/' + self.arguments['benchmark'] return name def AddChange(self, change): self.state.AddChange(change) def Start(self): """Starts the Job and updates it in the Datastore. This method is designed to return fast, so that Job creation is responsive to the user. It schedules the Job on the task queue without running anything. It also posts a bug comment, and updates the Datastore. """ self._Schedule() self.started = True self.started_time = datetime.datetime.now() self.put() title = _ROUND_PUSHPIN + ' Pinpoint job started.' comment = '\n'.join((title, self.url)) deferred.defer( _PostBugCommentDeferred, self.bug_id, comment, send_email=True, _retry_options=RETRY_OPTIONS) def _IsTryJob(self): return not self.comparison_mode or self.comparison_mode == job_state.TRY def _Complete(self): logging.debug('Job [%s]: Completed', self.job_id) if not self._IsTryJob(): self.difference_count = len(self.state.Differences()) try: results2.ScheduleResults2Generation(self) except taskqueue.Error as e: logging.debug('Failed ScheduleResults2Generation: %s', str(e)) self._FormatAndPostBugCommentOnComplete() self._UpdateGerritIfNeeded() scheduler.Complete(self) def _FormatAndPostBugCommentOnComplete(self): if self._IsTryJob(): # There is no comparison metric. title = "<b>%s Job complete. See results below.</b>" % _ROUND_PUSHPIN deferred.defer( _PostBugCommentDeferred, self.bug_id, '\n'.join((title, self.url)), _retry_options=RETRY_OPTIONS) return # There is a comparison metric. differences = self.state.Differences() if not differences: title = "<b>%s Couldn't reproduce a difference.</b>" % _ROUND_PUSHPIN deferred.defer( _PostBugCommentDeferred, self.bug_id, '\n'.join((title, self.url)), _retry_options=RETRY_OPTIONS) return difference_details = [] authors_with_deltas = {} commit_infos = [] for change_a, change_b in differences: if change_b.patch: commit_info = change_b.patch.AsDict() else: commit_info = change_b.last_commit.AsDict() values_a = self.state.ResultValues(change_a) values_b = self.state.ResultValues(change_b) difference = _FormatDifferenceForBug(commit_info, values_a, values_b, self.state.metric) difference_details.append(difference) commit_infos.append(commit_info) if values_a and values_b: authors_with_deltas[commit_info['author']] = job_state.Mean( values_b) - job_state.Mean(values_a) deferred.defer( _UpdatePostAndMergeDeferred, difference_details, commit_infos, authors_with_deltas, self.bug_id, self.tags, self.url, _retry_options=RETRY_OPTIONS) def _UpdateGerritIfNeeded(self): if self.gerrit_server and self.gerrit_change_id: deferred.defer( _UpdateGerritDeferred, self.gerrit_server, self.gerrit_change_id, '%s Job complete.\n\nSee results at: %s' % (_ROUND_PUSHPIN, self.url), _retry_options=RETRY_OPTIONS) def Fail(self, exception=None): tb = traceback.format_exc() or '' title = _CRYING_CAT_FACE + ' Pinpoint job stopped with an error.' exc_info = sys.exc_info() exc_message = '' if exception: exc_message = exception elif exc_info[1]: exc_message = sys.exc_info()[1].message self.exception_details = { 'message': exc_message, 'traceback': tb, } self.task = None comment = '\n'.join((title, self.url, '', exc_message)) deferred.defer( _PostBugCommentDeferred, self.bug_id, comment, _retry_options=RETRY_OPTIONS) scheduler.Complete(self) def _Schedule(self, countdown=_TASK_INTERVAL): # Set a task name to deduplicate retries. This adds some latency, but we're # not latency-sensitive. If Job.Run() works asynchronously in the future, # we don't need to worry about duplicate tasks. # https://github.com/catapult-project/catapult/issues/3900 task_name = str(uuid.uuid4()) try: task = taskqueue.add( queue_name='job-queue', url='/api/run/' + self.job_id, name=task_name, countdown=countdown) except (apiproxy_errors.DeadlineExceededError, taskqueue.TransientError): raise errors.RecoverableError() self.task = task.name def _MaybeScheduleRetry(self): if not hasattr(self, 'retry_count') or self.retry_count is None: self.retry_count = 0 if self.retry_count >= _MAX_RECOVERABLE_RETRIES: return False self.retry_count += 1 # Back off exponentially self._Schedule(countdown=_TASK_INTERVAL * (2 ** self.retry_count)) return True def Run(self): """Runs this Job. Loops through all Attempts and checks the status of each one, kicking off tasks as needed. Does not block to wait for all tasks to finish. Also compares adjacent Changes' results and adds any additional Attempts or Changes as needed. If there are any incomplete tasks, schedules another Run() call on the task queue. """ self.exception_details = None # In case the Job succeeds on retry. self.task = None # In case an exception is thrown. try: if not self._IsTryJob(): self.state.Explore() work_left = self.state.ScheduleWork() # Schedule moar task. if work_left: self._Schedule() else: self._Complete() self.retry_count = 0 except errors.RecoverableError: try: if not self._MaybeScheduleRetry(): self.Fail(errors.RETRY_LIMIT) except errors.RecoverableError: self.Fail(errors.RETRY_FAILED) except BaseException: self.Fail() raise finally: # Don't use `auto_now` for `updated`. When we do data migration, we need # to be able to modify the Job without changing the Job's completion time. self.updated = datetime.datetime.now() if self.completed: timing_record.RecordJobTiming(self) try: self.put() except (datastore_errors.Timeout, datastore_errors.TransactionFailedError): # Retry once. self.put() except datastore_errors.BadRequestError: if self.task: queue = taskqueue.Queue('job-queue') queue.delete_tasks(taskqueue.Task(name=self.task)) self.task = None # The _JobState is too large to fit in an ndb property. # Load the Job from before we updated it, and fail it. job = self.key.get(use_cache=False) job.task = None job.Fail() job.updated = datetime.datetime.now() job.put() raise def AsDict(self, options=None): d = { 'job_id': self.job_id, 'configuration': self.configuration, 'results_url': self.results_url, 'arguments': self.arguments, 'bug_id': self.bug_id, 'comparison_mode': self.comparison_mode, 'name': self.auto_name, 'user': self.user, 'created': self.created.isoformat(), 'updated': self.updated.isoformat(), 'difference_count': self.difference_count, 'exception': self.exception_details_dict, 'status': self.status, 'cancel_reason': self.cancel_reason, } if not options: return d if OPTION_STATE in options: d.update(self.state.AsDict()) if OPTION_ESTIMATE in options and not self.started: d.update(self._GetRunTimeEstimate()) if OPTION_TAGS in options: d['tags'] = {'tags': self.tags} return d def _GetRunTimeEstimate(self): result = timing_record.GetSimilarHistoricalTimings(self) if not result: return {} timings = [t.total_seconds() for t in result.timings] return { 'estimate': {'timings': timings, 'tags': result.tags}, 'queue_stats': scheduler.QueueStats(self.configuration) } def Cancel(self, user, reason): # We cannot cancel an already cancelled job. if self.cancelled: logging.warning( 'Attempted to cancel a cancelled job "%s"; user = %s, reason = %s', self.job_id, user, reason) raise errors.CancelError('Job already cancelled.') if not scheduler.Cancel(self): raise errors.CancelError('Scheduler failed to cancel job.') self.cancelled = True self.cancel_reason = '{}: {}'.format(user, reason) # Remove any "task" identifiers. self.task = None self.put() title = _ROUND_PUSHPIN + ' Pinpoint job cancelled.' comment = u'{}\n{}\n\nCancelled by {}, reason given: {}'.format( title, self.url, user, reason) deferred.defer(_PostBugCommentDeferred, self.bug_id, comment, send_email=True, _retry_options=RETRY_OPTIONS) def _GetBugStatus(issue_tracker, bug_id): if not bug_id: return None issue_data = issue_tracker.GetIssue(bug_id) if not issue_data: return None return issue_data.get('status') def _ComputePostMergeDetails(issue_tracker, commit_cache_key, cc_list): merge_details = {} if commit_cache_key: merge_details = update_bug_with_results.GetMergeIssueDetails( issue_tracker, commit_cache_key) if merge_details['id']: cc_list = [] return merge_details, cc_list def _PostBugCommentDeferred(bug_id, *args, **kwargs): if not bug_id: return issue_tracker = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) issue_tracker.AddBugComment(bug_id, *args, **kwargs) def _GenerateCommitCacheKey(commit_infos): commit_cache_key = None if len(commit_infos) == 1: commit_cache_key = update_bug_with_results._GetCommitHashCacheKey( commit_infos[0]['git_hash']) return commit_cache_key def _ComputePostOwnerSheriffCCList(commit_infos, authors_with_deltas): owner = None sheriff = None cc_list = set() if authors_with_deltas: owner, _ = max(authors_with_deltas.items(), key=lambda i: abs(i[1])) for cur_commit in commit_infos: if not owner: owner = cur_commit['author'] sheriff = utils.GetSheriffForAutorollCommit(owner, cur_commit['message']) cc_list.add(cur_commit['author']) if sheriff: owner = sheriff return owner, sheriff, cc_list def _UpdatePostAndMergeDeferred( difference_details, commit_infos, authors_deltas, bug_id, tags, url): if not bug_id: return commit_cache_key = _GenerateCommitCacheKey(commit_infos) # Bring it all together. owner, sheriff, cc_list = _ComputePostOwnerSheriffCCList(commit_infos, authors_deltas) comment = _FormatComment(difference_details, commit_infos, sheriff, tags, url) issue_tracker = issue_tracker_service.IssueTrackerService( utils.ServiceAccountHttp()) merge_details, cc_list = _ComputePostMergeDetails( issue_tracker, commit_cache_key, cc_list) current_bug_status = _GetBugStatus(issue_tracker, bug_id) if not current_bug_status: return status = None bug_owner = None if current_bug_status in ['Untriaged', 'Unconfirmed', 'Available']: # Set the bug status and owner if this bug is opened and unowned. status = 'Assigned' bug_owner = owner issue_tracker.AddBugComment(bug_id, comment, status=status, cc_list=sorted(cc_list), owner=bug_owner, merge_issue=merge_details.get('id')) update_bug_with_results.UpdateMergeIssue( commit_cache_key, merge_details, bug_id) def _UpdateGerritDeferred(*args, **kwargs): gerrit_service.PostChangeComment(*args, **kwargs) def _FormatDifferenceForBug(commit_info, values_a, values_b, metric): subject = '<b>%s</b> by %s' % (commit_info['subject'], commit_info['author']) if values_a: mean_a = job_state.Mean(values_a) formatted_a = '%.4g' % mean_a else: mean_a = None formatted_a = 'No values' if values_b: mean_b = job_state.Mean(values_b) formatted_b = '%.4g' % mean_b else: mean_b = None formatted_b = 'No values' if metric: metric = '%s: ' % metric else: metric = '' difference = '%s%s %s %s' % (metric, formatted_a, _RIGHT_ARROW, formatted_b) if values_a and values_b: difference += ' (%+.4g)' % (mean_b - mean_a) if mean_a: difference += ' (%+.4g%%)' % ((mean_b - mean_a) / mean_a * 100) else: difference += ' (+%s%%)' % _INFINITY return '\n'.join((subject, commit_info['url'], difference)) def _FormatComment(difference_details, commit_infos, sheriff, tags, url): if len(difference_details) == 1: status = 'Found a significant difference after 1 commit.' else: status = ('Found significant differences after each of %d commits.' % len(difference_details)) title = '<b>%s %s</b>' % (_ROUND_PUSHPIN, status) header = '\n'.join((title, url)) # Body. body = '\n\n'.join(difference_details) if sheriff: body += '\n\nAssigning to sheriff %s because "%s" is a roll.' % ( sheriff, commit_infos[-1]['subject']) # Footer. footer = ('Understanding performance regressions:\n' ' http://g.co/ChromePerformanceRegressions') if difference_details: footer += _FormatDocumentationUrls(tags) # Bring it all together. comment = '\n\n'.join((header, body, footer)) return comment def _FormatDocumentationUrls(tags): if not tags: return '' # TODO(simonhatch): Tags isn't the best way to get at this, but wait until # we move this back into the dashboard so we have a better way of getting # at the test path. # crbug.com/876899 test_path = tags.get('test_path') if not test_path: return '' test_suite = utils.TestKey('/'.join(test_path.split('/')[:3])) docs = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync( test_suite, [reserved_infos.DOCUMENTATION_URLS.name]) if not docs: return '' docs = docs[reserved_infos.DOCUMENTATION_URLS.name].get('values') footer = '\n\n%s:\n %s' % (docs[0][0], docs[0][1]) return footer
[]
[]
[ "HTTP_HOST" ]
[]
["HTTP_HOST"]
python
1
0
src/tero/clouds/monitor.py
# Copyright (c) 2021, Djaodjin Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse, configparser, datetime, json, logging, os, re, time from collections import OrderedDict import boto3 import botocore.exceptions import pytz, six #pylint:disable=import-error from six.moves.urllib.parse import urlparse from .awscloud import APP_NAME, EC2_RUNNING, get_regions from ..dparselog import parse_logname LOGGER = logging.getLogger(__name__) def as_datetime(dtime_at=None): if isinstance(dtime_at, six.string_types): look = re.match( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$', dtime_at) if look: kwargs = {key: int(val) for key, val in look.groupdict().items()} dtime_at = datetime.datetime(**kwargs) else: dtime_at = None if dtime_at and dtime_at.tzinfo is None: dtime_at = dtime_at.replace(tzinfo=pytz.utc) return dtime_at def datetime_or_now(dtime_at=None): if isinstance(dtime_at, six.string_types): look = re.match( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$', dtime_at) if look: kwargs = {key: int(val) for key, val in look.groupdict().items()} dtime_at = datetime.datetime(**kwargs) if not dtime_at: dtime_at = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) if dtime_at.tzinfo is None: dtime_at = dtime_at.replace(tzinfo=pytz.utc) return dtime_at def list_instances(regions=None, ec2_client=None): if not regions: regions = get_regions(ec2_client) runnning_instance_ids = [] for region_name in regions: LOGGER.info('look for instances in region %s...', region_name) ec2_client = boto3.client('ec2', region_name=region_name) resp = ec2_client.describe_instances( Filters=[{'Name': 'instance-state-name', 'Values': [EC2_RUNNING]}]) for reserv in resp['Reservations']: for instance in reserv['Instances']: runnning_instance_ids += [instance['InstanceId']] return runnning_instance_ids def list_logs(log_location, domains, lognames=['access', 'error'], start_at=None, ends_at=None, s3_client=None): """ log_location is s3://bucketname/prefix domains contains the logs we expect to find. """ search = {domain: {logname: [] for logname in lognames} for domain in domains} if not s3_client: s3_client = boto3.client('s3') _, bucket_name, prefix = urlparse(log_location)[:3] if prefix.startswith('/'): prefix = prefix[1:] LOGGER.info("list logs at s3://%s/%s" % (bucket_name, prefix)) resp = s3_client.list_objects_v2( Bucket=bucket_name, Prefix=prefix) continuation = ( resp['NextContinuationToken'] if resp['IsTruncated'] else None) process_log_meta(resp.get('Contents', []), search, start_at=start_at, ends_at=ends_at) while continuation: resp = s3_client.list_objects_v2( Bucket=bucket_name, Prefix=prefix, ContinuationToken=continuation) continuation = ( resp['NextContinuationToken'] if resp['IsTruncated'] else None) process_log_meta(resp.get('Contents', []), search, start_at=start_at, ends_at=ends_at) return search def process_db_meta(logmetas, search, start_at=None, ends_at=None): """ This function will populate the search dictionnary with the instance location of each log in the search template. example search template: { 'cowork.djaoapp.com': { 'db': [] } } """ if start_at: start_at = as_datetime(start_at) if ends_at: ends_at = as_datetime(ends_at) name = 'db' for logmeta in logmetas: at_date = None # db backup files have the following name pattern: # db_name.sql.gz look = re.match(r'(?P<db_name>\S+)\.sql-(?P<instance_id>[^-]+)\.gz', os.path.basename(logmeta['Key'])) if look: domain = look.group('db_name') instance_id = look.group('instance_id') at_date = datetime_or_now(logmeta['LastModified']) if at_date: if start_at: if start_at <= at_date: if ends_at: if at_date < ends_at: try: search[domain][name] += [ (at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or dbname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info( "skip %s, '%s' <= '%s' < '%s' (on date)" % ( logmeta['Key'], start_at.isoformat(), at_date.isoformat(), ends_at.isoformat())) else: try: search[domain][name] += [ (at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or dbname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("skip %s, '%s' <= '%s' (on date)" % ( logmeta['Key'], start_at.isoformat(), at_date.isoformat())) elif ends_at: if at_date < ends_at: try: search[domain][name] += [(at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or dbname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("skip %s, '%s' < '%s' (on date)" % ( logmeta['Key'], at_date.isoformat(), ends_at.isoformat())) else: try: search[domain][name] += [(at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or dbname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("err %s" % logmeta['Key']) def process_log_meta(logmetas, search, start_at=None, ends_at=None): """ This function will populate the search dictionnary with the instance location of each log in the search template. example search template: { 'cowork.djaoapp.com': { 'access': [] 'error': [] } } """ if start_at: start_at = as_datetime(start_at) if ends_at: ends_at = as_datetime(ends_at) for logmeta in logmetas: # Log files have the following name pattern: # domain-name.log-instanceid-yyyymmdd.gz domain, name, instance_id, at_date = parse_logname( os.path.basename(logmeta['Key'])) if at_date: if start_at: if start_at <= at_date: if ends_at: if at_date < ends_at: try: search[domain][name] += [ (at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or logname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info( "skip %s, '%s' <= '%s' < '%s' (on date)" % ( logmeta['Key'], start_at.isoformat(), at_date.isoformat(), ends_at.isoformat())) else: try: search[domain][name] += [ (at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or logname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("skip %s, '%s' <= '%s' (on date)" % ( logmeta['Key'], start_at.isoformat(), at_date.isoformat())) elif ends_at: if at_date < ends_at: try: search[domain][name] += [(at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or logname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("skip %s, '%s' < '%s' (on date)" % ( logmeta['Key'], at_date.isoformat(), ends_at.isoformat())) else: try: search[domain][name] += [(at_date, instance_id)] LOGGER.info("add %s, %s, %s, %s" % ( domain, name, instance_id, at_date.isoformat())) except KeyError: LOGGER.info( "skip %s, %s, %s, %s (on domain or logname)" % ( domain, name, instance_id, at_date.isoformat())) else: LOGGER.info("err %s" % logmeta['Key']) def search_db_storage(log_location, domains, start_at=None, ends_at=None, s3_client=None): """ We expect to find a backup for the period [``start_at``, ``ends_at``[ in the bucket ``log_location`` for all ``domains`` listed. ``domains`` is a dictionary formatted as such: { domain: [db_name, ...] } """ if not log_location.endswith('/'): log_location += '/' if not s3_client: s3_client = boto3.client('s3') _, bucket_name, prefix = urlparse(log_location)[:3] if prefix.startswith('/'): prefix = prefix[1:] db_to_domains = {} for domain, db_names in six.iteritems(domains): for db_name in db_names: if db_name in db_to_domains: db_to_domains[db_name] += [domain] else: db_to_domains[db_name] = [domain] backup = 'db' search = {db_name: {backup: []} for db_name in db_to_domains} resp = s3_client.list_objects_v2( Bucket=bucket_name, Prefix=prefix) continuation = ( resp['NextContinuationToken'] if resp['IsTruncated'] else None) process_db_meta(resp.get('Contents', []), search, start_at=start_at, ends_at=ends_at) while continuation: resp = s3_client.list_objects_v2( Bucket=bucket_name, Prefix=prefix, ContinuationToken=continuation) continuation = ( resp['NextContinuationToken'] if resp['IsTruncated'] else None) process_db_meta(resp.get('Contents', []), search, start_at=start_at, ends_at=ends_at) db_results = {} for domain, db_names in six.iteritems(domains): db_results[domain] = {backup: []} for db_name in db_names: db_results[domain][backup] += search[db_name][backup] return db_results def search_site_log_storage(log_location, domains, start_at=None, ends_at=None, s3_client=None): """ We expect to find logs for the period [``start_at``, ``ends_at``[ in the bucket ``log_location`` for all ``domains`` listed. ``domains`` is a dictionary formatted as such: { domain: [app_name, ...] } """ if not log_location.endswith('/'): log_location += '/' # nginx assets proxys log_results = list_logs(log_location + 'var/log/nginx', domains, start_at=start_at, ends_at=ends_at, s3_client=s3_client) # app containers for domain, app_names in six.iteritems(domains): for app_name in app_names: app_results = list_logs( log_location + '%(app_name)s' % { 'app_name': app_name}, [app_name], lognames=['app'], start_at=start_at, ends_at=ends_at, s3_client=s3_client) log_results[domain].update(app_results[app_name]) return log_results def search_proxy_log_storage(log_location, domains, start_at=None, ends_at=None, s3_client=None): """ We expect to find logs for the period [``start_at``, ``ends_at``[ in the bucket ``log_location`` for all ``domains`` listed. ``domains`` is a dictionary formatted as such: { domain: [app_name, ...] } """ if not log_location.endswith('/'): log_location += '/' # djaoapp session proxys log_results = list_logs(log_location + 'var/log/gunicorn', [APP_NAME], lognames=['access', 'error', 'app'], start_at=start_at, ends_at=ends_at, s3_client=s3_client) return log_results def main(input_args): """ Main entry point to run creation of AWS resources """ logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( '--dry-run', action='store_true', default=False, help='Do not create resources') parser.add_argument( '--log-location', action='store', default="s3://%s-logs/" % APP_NAME, help='location where logs are stored') parser.add_argument( '--domain', action='append', default=[], help='domain to check logs exists for') parser.add_argument( '--config', action='store', default=os.path.join(os.getenv('HOME'), '.aws', APP_NAME), help='configuration file') args = parser.parse_args(input_args[1:]) config = configparser.ConfigParser() config.read(args.config) LOGGER.info("read configuration from %s", args.config) for section in config.sections(): LOGGER.debug("[%s]", section) for key, val in config.items(section): if key.endswith('password'): LOGGER.debug("%s = [REDACTED]", key) else: LOGGER.debug("%s = %s", key, val) log_location = args.log_location domains = args.domain instances = list_instances()
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
agent/arcus-hal/hub-v2/src/main/java/com/iris/agent/hal/WatchdogControl.java
/* * Copyright 2019 Arcus Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.iris.agent.hal; import java.io.FileOutputStream; import java.io.IOException; import java.util.concurrent.TimeUnit; import org.apache.commons.io.IOUtils; import org.eclipse.jdt.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.iris.agent.attributes.HubAttributesService; import com.iris.agent.os.watchdog.WatchdogNative; import com.iris.agent.util.ThreadUtils; import com.iris.messages.capability.HubAdvancedCapability; class WatchdogControl { private static final Logger log = LoggerFactory.getLogger(WatchdogControl.class); private static final long WATCHDOG_EXPIRES_SECONDS = (System.getenv("IRIS_AGENT_WATCHDOG_TIMEOUT") != null) ? Math.max(95,Long.parseLong(System.getenv("IRIS_AGENT_WATCHDOG_TIMEOUT"))) : 300; private static final long WATCHDOG_CHECK_SECONDS = 10; private @Nullable static Watchdog hwWd; private @Nullable static Watchdog swWd; private WatchdogControl() { } public static final WatchdogControl create() { return new WatchdogControl(); } public synchronized void start(long maxWatchdog) { // Create hardware watchdog, if supported try { shutdown(false); hwWd = new HardwareWatchdog(maxWatchdog); } catch (IOException ex) { log.warn("could not open hardware watchdog, not available for this hardware", ex); hwWd = null; } // Setup software watchdog swWd = new SoftwareWatchdog(); } public synchronized void shutdown(boolean clean) { if (swWd != null) { swWd.shutdown(clean); } swWd = null; // Clean up hardware watchdog as well if (hwWd != null) { hwWd.shutdown(clean); } hwWd = null; } public void poke() { // Use software watchdog for general watchdog if (swWd != null) { swWd.poke(); } } private interface Watchdog { void poke(); void shutdown(boolean clean); } private static final class HardwareWatchdog implements Watchdog { private final FileOutputStream os; HardwareWatchdog(long maxWatchdog) throws IOException { this.os = new FileOutputStream("/dev/watchdog0"); this.os.write('A'); this.os.flush(); if (WatchdogNative.isAvailable()) { try { int dto = WatchdogNative.getWatchdogTimeout(this.os); // If there is a hardware limit on the watchdog period, reset current value WatchdogNative.setWatchdogTimeout(this.os, (int)maxWatchdog); int nto = WatchdogNative.getWatchdogTimeout(this.os); Object info = WatchdogNative.getWatchdogInfo(this.os); String id = WatchdogNative.getWatchdogInfoIdentity(info); log.warn("hardware watchdog implementation: {}", id); log.warn("hardware watchdog timeouts: default={}, updated={}", dto, nto); log.warn("hardware watchdog flags: {}", WatchdogNative.getWatchdogInfoFlags(info)); log.warn("hardware watchdog temp: {}", WatchdogNative.getWatchdogTemp(this.os)); } catch (Throwable th) { log.warn("could not setup watchdog settings, continuing with defaults:", th); } } } @Override public void poke() { try { os.write('A'); os.flush(); } catch (IOException ex) { log.warn("could not poke watchdog, reboot may occur:", ex); } } @Override public void shutdown(boolean clean) { // NOTE: The V2 hub does not actually shut the HW watchdog off here (which // could be done by writing a 'V' into the watchdog file. We leave // the watchdog so that the hub will reboot if the agent isn't restarted // for any reason. poke(); IOUtils.closeQuietly(os); } } private static final class SoftwareWatchdog implements Watchdog, Runnable { private final Thread thr; private boolean running; private long lastPokeTime; SoftwareWatchdog() { this.thr = new Thread(this); this.thr.setName("wtdg"); this.thr.setDaemon(true); this.running = true; this.lastPokeTime = System.nanoTime(); this.thr.start(); log.warn("software watchdog timeout: {}", WATCHDOG_EXPIRES_SECONDS); } @Override public void poke() { if (running) { this.lastPokeTime = System.nanoTime(); } } @Override public void shutdown(boolean clean) { poke(); running = false; } @Override public void run() { try { while (running) { ThreadUtils.sleep(WATCHDOG_CHECK_SECONDS, TimeUnit.SECONDS); long now = System.nanoTime(); long elapsedSeconds = TimeUnit.SECONDS.convert(now-lastPokeTime, TimeUnit.NANOSECONDS); if (elapsedSeconds >= WATCHDOG_EXPIRES_SECONDS) { log.error("SOFTWARE WATCHDOG TIMEOUT EXPIRED, ATTEMPTING TO FORCE REBOOT OF SYSTEM"); running = false; HubAttributesService.setLastRestartReason(HubAdvancedCapability.LASTRESTARTREASON_WATCHDOG); IrisHal.rebootAndSelfCheck(); } // Kick hardware watchdog which acts as a deadman if (hwWd != null) { hwWd.poke(); } } log.warn("software watchdog exiting normally");; } catch (Throwable th) { log.error("SOFTWARE WATCHDOG EXITED ABNORMALLY:", th); } } } }
[ "\"IRIS_AGENT_WATCHDOG_TIMEOUT\"", "\"IRIS_AGENT_WATCHDOG_TIMEOUT\"" ]
[]
[ "IRIS_AGENT_WATCHDOG_TIMEOUT" ]
[]
["IRIS_AGENT_WATCHDOG_TIMEOUT"]
java
1
0
tests/shakemap/coremods/select_test.py
#!/usr/bin/env python import os import os.path import pytest from shakemap.utils.config import get_config_paths from shakemap.coremods.select import SelectModule from common import clear_files, set_files ######################################################################## # Test select ######################################################################## def test_select(): installpath, datapath = get_config_paths() # Process a non-existent event (should fail) smod = SelectModule("not_an_event") with pytest.raises(NotADirectoryError): smod.execute() # Normal event (should succeed) event_path = os.path.join(datapath, "nc72282711", "current") set_files(event_path, {"event.xml": "event.xml"}) conf_file = os.path.join(datapath, "nc72282711", "current", "model_select.conf") smod = SelectModule("nc72282711") smod.execute() failed = False if not os.path.isfile(conf_file): failed = True clear_files(event_path) if failed: assert False # Subduction event (not over slab) conf_file = os.path.join(datapath, "usp0004bxs", "current", "model_select.conf") if os.path.isfile(conf_file): os.remove(conf_file) try: smod = SelectModule("usp0004bxs") smod.execute() finally: if not os.path.isfile(conf_file): print("select failed!") assert False else: os.remove(conf_file) # Northridge, with moment tensor file conf_file = os.path.join(datapath, "northridge2", "current", "model_select.conf") if os.path.isfile(conf_file): os.remove(conf_file) try: smod = SelectModule("northridge2") smod.execute() finally: if not os.path.isfile(conf_file): print("select failed!") assert False else: os.remove(conf_file) if __name__ == "__main__": os.environ["CALLED_FROM_PYTEST"] = "True" test_select()
[]
[]
[ "CALLED_FROM_PYTEST" ]
[]
["CALLED_FROM_PYTEST"]
python
1
0
_example/main.go
package main import ( "fmt" "os" "os/signal" "syscall" "time" "github.com/TopiSenpai/paginator" "github.com/bwmarrin/discordgo" ) var ( Token = os.Getenv("TOKEN") ) func main() { dg, err := discordgo.New("Bot " + Token) if err != nil { fmt.Println("error creating Discord session,", err) return } manager := paginator.NewManager() // Register the messageCreate func as a callback for MessageCreate events. dg.AddHandler(manager.OnInteractionCreate) dg.AddHandler(func(s *discordgo.Session, m *discordgo.MessageCreate) { if m.Author.ID == s.State.User.ID || m.Content != "!test" { return } pages := []string{ "page1", "page2", "page3", } if err = manager.CreateMessage(s, m.ChannelID, &paginator.Paginator{ PageFunc: func(page int, embed *discordgo.MessageEmbed) { embed.Description = pages[page] }, MaxPages: len(pages), Expiry: time.Now(), ExpiryLastUsage: true, }); err != nil { fmt.Println(err) } }) if err = dg.Open(); err != nil { fmt.Println("error opening connection: ", err) return } s := make(chan os.Signal, 1) signal.Notify(s, syscall.SIGINT, syscall.SIGTERM, os.Interrupt) <-s }
[ "\"TOKEN\"" ]
[]
[ "TOKEN" ]
[]
["TOKEN"]
go
1
0
backend/application/settings.py
""" Django settings for application project. Generated by 'django-admin startproject' using Django 3.2.3. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # ================================================= # # ******************** 动态配置 ******************** # # ================================================= # from conf.env import * # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure--z8%exyzt7e_%i@1+#1mm=%lb5=^fx_57=1@a+_y7bg5-w%)sm' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = locals().get('DEBUG', True) ALLOWED_HOSTS = locals().get('ALLOWED_HOSTS', ['*']) # Application definition INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_comment_migrate', 'rest_framework', 'django_filters', 'corsheaders', # 注册跨域app 'dvadmin.system', 'drf_yasg', 'captcha', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', # 跨域中间件 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'dvadmin.utils.middleware.ApiLoggingMiddleware', ] ROOT_URLCONF = 'application.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'application.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': DATABASE_ENGINE, 'NAME': DATABASE_NAME, 'USER': DATABASE_USER, 'PASSWORD': DATABASE_PASSWORD, 'HOST': os.getenv('DATABASE_HOST') or DATABASE_HOST, 'PORT': DATABASE_PORT, } } AUTH_USER_MODEL = 'system.Users' USERNAME_FIELD = 'username' # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'zh-hans' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # # 设置django的静态文件目录 STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] MEDIA_ROOT = 'media' # 项目下的目录 MEDIA_URL = "/media/" # 跟STATIC_URL类似,指定用户可以通过这个url找到文件 # 收集静态文件,必须将 MEDIA_ROOT,STATICFILES_DIRS先注释 # python manage.py collectstatic # STATIC_ROOT=os.path.join(BASE_DIR,'static') # ================================================= # # ******************* 跨域的配置 ******************* # # ================================================= # # 全部允许配置 CORS_ORIGIN_ALLOW_ALL = True # 允许cookie CORS_ALLOW_CREDENTIALS = True # 指明在跨域访问中,后端是否支持对cookie的操作 # ================================================= # # ********************* 日志配置 ******************* # # ================================================= # # log 配置部分BEGIN # SERVER_LOGS_FILE = os.path.join(BASE_DIR, 'logs', 'server.log') ERROR_LOGS_FILE = os.path.join(BASE_DIR, 'logs', 'error.log') if not os.path.exists(os.path.join(BASE_DIR, 'logs')): os.makedirs(os.path.join(BASE_DIR, 'logs')) # 格式:[2020-04-22 23:33:01][micoservice.apps.ready():16] [INFO] 这是一条日志: # 格式:[日期][模块.函数名称():行号] [级别] 信息 STANDARD_LOG_FORMAT = '[%(asctime)s][%(name)s.%(funcName)s():%(lineno)d] [%(levelname)s] %(message)s' CONSOLE_LOG_FORMAT = '[%(asctime)s][%(name)s.%(funcName)s():%(lineno)d] [%(levelname)s] %(message)s' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': STANDARD_LOG_FORMAT }, 'console': { 'format': CONSOLE_LOG_FORMAT, 'datefmt': '%Y-%m-%d %H:%M:%S', }, 'file': { 'format': CONSOLE_LOG_FORMAT, 'datefmt': '%Y-%m-%d %H:%M:%S', }, }, 'handlers': { 'file': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': SERVER_LOGS_FILE, 'maxBytes': 1024 * 1024 * 100, # 100 MB 'backupCount': 5, # 最多备份5个 'formatter': 'standard', 'encoding': 'utf-8', }, 'error': { 'level': 'ERROR', 'class': 'logging.handlers.RotatingFileHandler', 'filename': ERROR_LOGS_FILE, 'maxBytes': 1024 * 1024 * 100, # 100 MB 'backupCount': 3, # 最多备份3个 'formatter': 'standard', 'encoding': 'utf-8', }, 'console': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'console', } }, 'loggers': { # default日志 '': { 'handlers': ['console', 'error', 'file'], 'level': 'INFO', }, 'django': { 'handlers': ['console', 'error', 'file'], 'level': 'INFO', }, 'scripts': { 'handlers': ['console', 'error', 'file'], 'level': 'INFO', }, # 数据库相关日志 'django.db.backends': { 'handlers': [], 'propagate': True, 'level': 'INFO', }, } } # ================================================= # # *************** REST_FRAMEWORK配置 *************** # # ================================================= # REST_FRAMEWORK = { 'DATETIME_FORMAT': "%Y-%m-%d %H:%M:%S", # 日期时间格式配置 'DATE_FORMAT': "%Y-%m-%d", 'DEFAULT_FILTER_BACKENDS': ( # 'django_filters.rest_framework.DjangoFilterBackend', 'dvadmin.utils.filters.CustomDjangoFilterBackend', 'rest_framework.filters.SearchFilter', 'rest_framework.filters.OrderingFilter', ), 'DEFAULT_PAGINATION_CLASS': 'dvadmin.utils.pagination.CustomPagination', # 自定义分页 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_simplejwt.authentication.JWTAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'EXCEPTION_HANDLER': 'dvadmin.utils.exception.CustomExceptionHandler', # 自定义的异常处理 } # ================================================= # # ******************** 登录方式配置 ******************** # # ================================================= # AUTHENTICATION_BACKENDS = [ 'dvadmin.utils.backends.CustomBackend' ] # ================================================= # # ****************** simplejwt配置 ***************** # # ================================================= # from datetime import timedelta SIMPLE_JWT = { # token有效时长 'ACCESS_TOKEN_LIFETIME': timedelta(days=1), # token刷新后的有效时间 'REFRESH_TOKEN_LIFETIME': timedelta(days=1), # 设置前缀 'AUTH_HEADER_TYPES': ('JWT',), 'ROTATE_REFRESH_TOKENS': True } # ====================================# # ****************swagger************# # ====================================# SWAGGER_SETTINGS = { # 基础样式 'SECURITY_DEFINITIONS': { "basic": { 'type': 'basic' } }, # 如果需要登录才能够查看接口文档, 登录的链接使用restframework自带的. 'LOGIN_URL': 'apiLogin/', # 'LOGIN_URL': 'rest_framework:login', 'LOGOUT_URL': 'rest_framework:logout', # 'DOC_EXPANSION': None, # 'SHOW_REQUEST_HEADERS':True, # 'USE_SESSION_AUTH': True, # 'DOC_EXPANSION': 'list', # 接口文档中方法列表以首字母升序排列 'APIS_SORTER': 'alpha', # 如果支持json提交, 则接口文档中包含json输入框 'JSON_EDITOR': True, # 方法列表字母排序 'OPERATIONS_SORTER': 'alpha', 'VALIDATOR_URL': None, 'AUTO_SCHEMA_TYPE': 2, # 分组根据url层级分,0、1 或 2 层 'DEFAULT_AUTO_SCHEMA_CLASS': 'dvadmin.utils.swagger.CustomSwaggerAutoSchema', } # ================================================= # # **************** 验证码配置 ******************* # # ================================================= # CAPTCHA_STATE = True CAPTCHA_IMAGE_SIZE = (160, 60) # 设置 captcha 图片大小 CAPTCHA_LENGTH = 4 # 字符个数 CAPTCHA_TIMEOUT = 1 # 超时(minutes) CAPTCHA_OUTPUT_FORMAT = '%(image)s %(text_field)s %(hidden_field)s ' CAPTCHA_FONT_SIZE = 40 # 字体大小 CAPTCHA_FOREGROUND_COLOR = '#0033FF' # 前景色 CAPTCHA_BACKGROUND_COLOR = '#F5F7F4' # 背景色 CAPTCHA_NOISE_FUNCTIONS = ( 'captcha.helpers.noise_arcs', # 线 'captcha.helpers.noise_dots', # 点 ) # CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.random_char_challenge' #字母验证码 CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge' # 加减乘除验证码 # ================================================= # # ******************** 其他配置 ******************** # # ================================================= # # 插件yaml地址 PLUGINS_WEB_YAML_PATH = os.path.join(BASE_DIR, os.path.pardir, "web", "src", "views", "dvadmin_plugins", "config.json") PLUGINS_BACKEND_YAML_PATH = os.path.join(BASE_DIR, "plugins", "config.json") DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' API_LOG_ENABLE = True # API_LOG_METHODS = 'ALL' # ['POST', 'DELETE'] API_LOG_METHODS = ['POST', 'UPDATE', 'DELETE', 'PUT'] # ['POST', 'DELETE'] API_MODEL_MAP = { "/token/": "登录模块", "/api/login/": "登录模块", "/api/plugins_market/plugins/": "插件市场", } # 表前缀 TABLE_PREFIX = "dvadmin_" DJANGO_CELERY_BEAT_TZ_AWARE = False CELERY_TIMEZONE = 'Asia/Shanghai' # celery 时区问题 # 静态页面压缩 STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage' # 初始化需要执行的列表,用来初始化后执行 INITIALIZE_LIST = [] INITIALIZE_RESET_LIST = [] # 导入租户数据 SHARED_APPS = [] from plugins import *
[]
[]
[ "DATABASE_HOST" ]
[]
["DATABASE_HOST"]
python
1
0
server/config/config_test.go
package config import ( "fmt" "os" "reflect" "ritchie-server/server" "testing" ) func TestConfiguration_ReadCliVersionConfigs(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } type args struct { org string } tests := []struct { name string fields fields in args out server.CliVersionConfig outErr bool }{ { name: "read cli version configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { CliVersionConfig: server.CliVersionConfig{ Url: "http://localhost:8882/s3-version-mock", Provider: "s3", }, }, }, }, in: args{org: "zup"}, out: server.CliVersionConfig{ Url: "http://localhost:8882/s3-version-mock", Provider: "s3", }, outErr: false, }, { name: "error read cli version configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { CliVersionConfig: server.CliVersionConfig{ Url: "http://localhost:8882/s3-version-mock", Provider: "s3", }, }, }, }, in: args{org: "error"}, out: server.CliVersionConfig{}, outErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) got, err := c.ReadCliVersionConfigs(tt.in.org) if (err != nil) != tt.outErr { t.Errorf("ReadCliVersionConfigs() error = %v, outErr %v", err, tt.outErr) return } if !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadCliVersionConfigs() got = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadCredentialConfigs(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } type args struct { org string } tests := []struct { name string fields fields in args out map[string][]server.CredentialConfig outErr bool }{ { name: "read credential configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { CredentialConfig: map[string][]server.CredentialConfig{ "credential1": {{Field: "Field", Type: "type"}}, "credential2": {{Field: "field2", Type: "type"}}, }, }, }, }, in: args{org: "zup"}, out: map[string][]server.CredentialConfig{ "credential1": {{Field: "Field", Type: "type"}}, "credential2": {{Field: "field2", Type: "type"}}, }, outErr: false, }, { name: "error read credential configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { CredentialConfig: map[string][]server.CredentialConfig{ "credential1": {{Field: "Field", Type: "type"}}, "credential2": {{Field: "field2", Type: "type"}}, }, }, }, }, in: args{org: "error"}, out: nil, outErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) got, err := c.ReadCredentialConfigs(tt.in.org) if (err != nil) != tt.outErr { t.Errorf("ReadCredentialConfigs() error = %v, outErr %v", err, tt.outErr) return } if !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadCredentialConfigs() got = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadHealthConfigs(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } tests := []struct { name string fields fields out map[string]server.HealthEndpoints }{ { name: "read health check configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { KeycloakConfig: &server.KeycloakConfig{ Url: "http://keycloak:8080", }, }, }, }, out: map[string]server.HealthEndpoints{ "zup": { KeycloakURL: "http://keycloak:8080", VaultURL: getVaultUrl(), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) if got := c.ReadHealthConfigs(); !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadHealthConfigs() = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadKeycloakConfigs(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } type args struct { org string } tests := []struct { name string fields fields in args out *server.KeycloakConfig outErr bool }{ { name: "read keycloak configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { KeycloakConfig: &server.KeycloakConfig{ Url: "http://keycloak:8080", Realm: "ritchie", ClientId: "user-login", ClientSecret: "user-login", }, }, }, }, in: args{org: "zup"}, out: &server.KeycloakConfig{ Url: "http://keycloak:8080", Realm: "ritchie", ClientId: "user-login", ClientSecret: "user-login", }, outErr: false, }, { name: "error read keycloak configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { KeycloakConfig: &server.KeycloakConfig{ Url: "http://keycloak:8080", Realm: "ritchie", ClientId: "user-login", ClientSecret: "user-login", }, }, }, }, in: args{org: "error"}, out: nil, outErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) got, err := c.ReadKeycloakConfigs(tt.in.org) if (err != nil) != tt.outErr { t.Errorf("ReadKeycloakConfigs() error = %v, outErr %v", err, tt.outErr) return } if !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadKeycloakConfigs() got = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadOauthConfig(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } type args struct { org string } tests := []struct { name string fields fields in args out *server.OauthConfig outErr bool }{ { name: "read oauth configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { OauthConfig: &server.OauthConfig{ Url: "http://localhost:8080/auth/realms/ritchie", ClientId: "oauth", }, }, }, }, in: args{org: "zup"}, out: &server.OauthConfig{ Url: "http://localhost:8080/auth/realms/ritchie", ClientId: "oauth", }, outErr: false, }, { name: "error read oauth configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { OauthConfig: &server.OauthConfig{ Url: "http://localhost:8080/auth/realms/ritchie", ClientId: "oauth", }, }, }, }, in: args{org: "error"}, out: nil, outErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) got, err := c.ReadOauthConfig(tt.in.org) if (err != nil) != tt.outErr { t.Errorf("ReadOauthConfig() error = %v, outErr %v", err, tt.outErr) return } if !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadOauthConfig() got = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadRepositoryConfig(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } type args struct { org string } tests := []struct { name string fields fields in args out []server.Repository outErr bool }{ { name: "read repository configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { RepositoryConfig: []server.Repository{ { Name: "local", Priority: 0, TreePath: "path_whatever", Username: "", Password: "", }, { Name: "repository1", Priority: 1, TreePath: "path_whatever_repository1", Username: "optional", Password: "optional", }, }, }, }, }, in: args{org: "zup"}, out: []server.Repository{ { Name: "local", Priority: 0, TreePath: "path_whatever", Username: "", Password: "", }, { Name: "repository1", Priority: 1, TreePath: "path_whatever_repository1", Username: "optional", Password: "optional", }, }, outErr: false, }, { name: "error read repository configuration", fields: fields{ Configs: map[string]*server.ConfigFile{ "zup": { RepositoryConfig: []server.Repository{ { Name: "local", Priority: 0, TreePath: "path_whatever", Username: "", Password: "", }, { Name: "repository1", Priority: 1, TreePath: "path_whatever_repository1", Username: "optional", Password: "optional", }, }, }, }, }, in: args{org: "error"}, out: nil, outErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) got, err := c.ReadRepositoryConfig(tt.in.org) if (err != nil) != tt.outErr { t.Errorf("ReadRepositoryConfig() error = %v, outErr %v", err, tt.outErr) return } if !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadRepositoryConfig() got = %v, out %v", got, tt.out) } }) } } func TestConfiguration_ReadSecurityConstraints(t *testing.T) { type fields struct { Configs map[string]*server.ConfigFile SecurityConstraints server.SecurityConstraints } tests := []struct { name string fields fields out server.SecurityConstraints }{ { name: "read security constraints configuration", fields: fields{ SecurityConstraints: server.SecurityConstraints{ Constraints: []server.DenyMatcher{{ Pattern: "/test", RoleMappings: map[string][]string{"user": {"POST", "GET"}}, }}, PublicConstraints: []server.PermitMatcher{{ Pattern: "/public", Methods: []string{"POST", "GET"}, }}, }, }, out: server.SecurityConstraints{ Constraints: []server.DenyMatcher{{ Pattern: "/test", RoleMappings: map[string][]string{"user": {"POST", "GET"}}, }}, PublicConstraints: []server.PermitMatcher{{ Pattern: "/public", Methods: []string{"POST", "GET"}, }}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewConfiguration(tt.fields.Configs, tt.fields.SecurityConstraints) if got := c.ReadSecurityConstraints(); !reflect.DeepEqual(got, tt.out) { t.Errorf("ReadSecurityConstraints() = %v, out %v", got, tt.out) } }) } } func getVaultUrl() string { p := "%s/sys/health" value := os.Getenv("VAULT_ADDR") if value == "" { value = "https://127.0.0.1:8200" } return fmt.Sprintf(p, value) }
[ "\"VAULT_ADDR\"" ]
[]
[ "VAULT_ADDR" ]
[]
["VAULT_ADDR"]
go
1
0
share/qt/extract_strings_qt.py
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE import glob import operator import os import sys OUT_CPP="qt/p59strings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') if not XGETTEXT: print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr) print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr) exit(1) child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out.decode('utf-8')) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *p59_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("p59-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
[]
[]
[ "XGETTEXT" ]
[]
["XGETTEXT"]
python
1
0
integration_tests/integration_tests.py
#!/usr/bin/env python from copy import deepcopy import json import os import math import traceback import click import numpy as np import rastervision as rv from integration_tests.chip_classification_tests.experiment \ import ChipClassificationIntegrationTest from integration_tests.object_detection_tests.experiment \ import ObjectDetectionIntegrationTest from integration_tests.semantic_segmentation_tests.experiment \ import SemanticSegmentationIntegrationTest from rastervision.rv_config import RVConfig all_tests = [ rv.CHIP_CLASSIFICATION, rv.OBJECT_DETECTION, rv.SEMANTIC_SEGMENTATION ] np.random.seed(1234) if rv.backend.tf_available: import tensorflow tensorflow.set_random_seed(5678) # Suppress warnings and info to avoid cluttering CI log os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' TEST_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class IntegrationTestExperimentRunner(rv.runner.LocalExperimentRunner): def __init__(self, tmp_dir=None): super().__init__(tmp_dir) def _run_experiment(self, command_dag): """Check serialization of all commands.""" for command_config in command_dag.get_sorted_commands(): deepcopy( rv.command.CommandConfig.from_proto(command_config.to_proto())) super()._run_experiment(command_dag) def console_info(msg): click.echo(click.style(msg, fg='green')) def console_warning(msg): click.echo(click.style(msg, fg='yellow')) def console_error(msg): click.echo(click.style(msg, fg='red', err=True)) class TestError(): def __init__(self, test, message, details=None): self.test = test self.message = message self.details = details def __str__(self): return ('Error\n' + '------\n' + 'Test: {}\n'.format(self.test) + 'Message: {}\n'.format(self.message) + 'Details: {}'.format( str(self.details)) if self.details else '' + '\n') def get_test_dir(test): return os.path.join(TEST_ROOT_DIR, test.lower().replace('-', '_')) def get_expected_eval_path(test): return os.path.join('{}_tests'.format(get_test_dir(test)), 'expected-output/eval.json') def get_actual_eval_path(test, temp_dir): return os.path.join(temp_dir, test.lower(), 'eval/default/eval.json') def open_json(path): with open(path, 'r') as file: return json.load(file) def check_eval_item(test, expected_item, actual_item): errors = [] f1_threshold = 0.01 class_name = expected_item['class_name'] expected_f1 = expected_item['f1'] or 0.0 actual_f1 = actual_item['f1'] or 0.0 if math.fabs(expected_f1 - actual_f1) > f1_threshold: errors.append( TestError( test, 'F1 scores are not close enough', 'for class_name: {} expected f1: {}, actual f1: {}'.format( class_name, expected_item['f1'], actual_item['f1']))) return errors def check_eval(test, temp_dir): errors = [] actual_eval_path = get_actual_eval_path(test, temp_dir) expected_eval_path = get_expected_eval_path(test) if os.path.isfile(actual_eval_path): expected_eval = open_json(expected_eval_path)['overall'] actual_eval = open_json(actual_eval_path)['overall'] for expected_item in expected_eval: class_name = expected_item['class_name'] actual_item = \ next(filter( lambda x: x['class_name'] == class_name, actual_eval)) errors.extend(check_eval_item(test, expected_item, actual_item)) else: errors.append( TestError(test, 'actual eval file does not exist', actual_eval_path)) return errors def get_experiment(test, use_tf, tmp_dir): if test == rv.OBJECT_DETECTION: return ObjectDetectionIntegrationTest().exp_main( os.path.join(tmp_dir, test.lower()), use_tf=use_tf) if test == rv.CHIP_CLASSIFICATION: return ChipClassificationIntegrationTest().exp_main( os.path.join(tmp_dir, test.lower()), use_tf=use_tf) if test == rv.SEMANTIC_SEGMENTATION: return SemanticSegmentationIntegrationTest().exp_main( os.path.join(tmp_dir, test.lower()), use_tf=use_tf) raise Exception('Unknown test {}'.format(test)) def test_prediction_package_validation(experiment, test, temp_dir, image_uri): console_info('Checking predict command validation...') errors = [] pp = experiment.task.predict_package_uri predict = rv.Predictor(pp, temp_dir, channel_order=[0, 1, 7]).predict try: predict(image_uri, 'x.txt') e = TestError(test, ('Predictor should have raised exception due to invalid ' 'channel_order, but did not.'), 'in experiment {}'.format(experiment.id)) errors.append(e) except ValueError: pass return errors def test_prediction_package_results(experiment, test, temp_dir, scenes, scenes_to_uris): console_info('Checking predict package produces same results...') errors = [] pp = experiment.task.predict_package_uri predict = rv.Predictor(pp, temp_dir).predict for scene_config in scenes: # Need to write out labels and read them back, # otherwise the floating point precision direct box # coordinates will not match those from the PREDICT # command, which are rounded to pixel coordinates # via pyproj logic (in the case of rasterio crs transformer. predictor_label_store_uri = os.path.join( temp_dir, test.lower(), 'predictor/{}'.format(scene_config.id)) uri = scenes_to_uris[scene_config.id] predict(uri, predictor_label_store_uri) scene = scene_config.create_scene(experiment.task, temp_dir) scene_labels = scene.prediction_label_store.get_labels() extent = scene.raster_source.get_extent() crs_transformer = scene.raster_source.get_crs_transformer() predictor_label_store = scene_config.label_store \ .for_prediction( predictor_label_store_uri) \ .create_store( experiment.task, extent, crs_transformer, temp_dir) from rastervision.data import ActivateMixin with ActivateMixin.compose(scene, predictor_label_store): if not predictor_label_store.get_labels() == scene_labels: e = TestError( test, ('Predictor did not produce the same labels ' 'as the Predict command'), 'for scene {} in experiment {}'.format( scene_config.id, experiment.id)) errors.append(e) return errors def test_prediction_package(experiment, test, temp_dir, check_channel_order=False): # Check the prediction package # This will only work with raster_sources that # have a single URI. skip = False errors = [] experiment = experiment.fully_resolve() scenes_to_uris = {} scenes = experiment.dataset.validation_scenes for scene in scenes: rs = scene.raster_source if hasattr(rs, 'uri'): scenes_to_uris[scene.id] = rs.uri elif hasattr(rs, 'uris'): uris = rs.uris if len(uris) > 1: skip = True else: scenes_to_uris[scene.id] = uris[0] else: skip = True if skip: console_warning('Skipping predict package test for ' 'test {}, experiment {}'.format(test, experiment.id)) else: if check_channel_order: errors.extend( test_prediction_package_validation(experiment, test, temp_dir, uris[0])) else: errors.extend( test_prediction_package_results(experiment, test, temp_dir, scenes, scenes_to_uris)) return errors def run_test(test, use_tf, temp_dir): errors = [] experiment = get_experiment(test, use_tf, temp_dir) commands_to_run = rv.all_commands() # Check serialization pp_uri = os.path.join(experiment.bundle_uri, 'predict_package.zip') experiment.task.predict_package_uri = pp_uri msg = experiment.to_proto() experiment = rv.ExperimentConfig.from_proto(msg) # Check that running doesn't raise any exceptions. try: IntegrationTestExperimentRunner(os.path.join(temp_dir, test.lower())) \ .run(experiment, rerun_commands=True, splits=2, commands_to_run=commands_to_run) except Exception: errors.append( TestError(test, 'raised an exception while running', traceback.format_exc())) return errors # Check that the eval is similar to expected eval. errors.extend(check_eval(test, temp_dir)) if not errors: errors.extend(test_prediction_package(experiment, test, temp_dir)) errors.extend( test_prediction_package( experiment, test, temp_dir, check_channel_order=True)) return errors @click.command() @click.argument('tests', nargs=-1) @click.option( '--rv_root', '-t', help=('Sets the rv_root directory used. ' 'If set, test will not clean this directory up.')) @click.option( '--verbose', '-v', is_flag=True, help=('Sets the logging level to DEBUG.')) @click.option( '--use-tf', '-v', is_flag=True, help=('Run using TF-based backends.')) def main(tests, rv_root, verbose, use_tf): """Runs RV end-to-end and checks that evaluation metrics are correct.""" if len(tests) == 0: tests = all_tests if verbose: rv._registry.initialize_config( verbosity=rv.cli.verbosity.Verbosity.DEBUG) tests = list(map(lambda x: x.upper(), tests)) with RVConfig.get_tmp_dir() as temp_dir: if rv_root: temp_dir = rv_root errors = [] for test in tests: if test not in all_tests: print('{} is not a valid test.'.format(test)) return errors.extend(run_test(test, use_tf, temp_dir)) for error in errors: print(error) for test in tests: nb_test_errors = len( list(filter(lambda error: error.test == test, errors))) if nb_test_errors == 0: print('{} test passed!'.format(test)) if errors: exit(1) if __name__ == '__main__': main()
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent """ import os from resource_management.libraries.script.script import Script from resource_management.libraries.resources.hdfs_resource import HdfsResource from resource_management.libraries.functions import conf_select from resource_management.libraries.functions import stack_select from resource_management.libraries.functions import format from resource_management.libraries.functions import StackFeature from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions.stack_features import get_stack_feature_version from resource_management.libraries.functions import get_kinit_path from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources from resource_management.libraries.functions.version import format_stack_version from resource_management.libraries.functions.default import default from resource_management.libraries import functions from resource_management.libraries.functions import is_empty from resource_management.libraries.functions.get_architecture import get_architecture from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs import status_params # a map of the Ambari role to the component name # for use with <stack-root>/current/<component> MAPR_SERVER_ROLE_DIRECTORY_MAP = { 'HISTORYSERVER' : 'hadoop-mapreduce-historyserver', 'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client', } YARN_SERVER_ROLE_DIRECTORY_MAP = { 'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver', 'NODEMANAGER' : 'hadoop-yarn-nodemanager', 'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager', 'YARN_CLIENT' : 'hadoop-yarn-client' } # server configurations config = Script.get_config() tmp_dir = Script.get_tmp_dir() architecture = get_architecture() stack_name = status_params.stack_name stack_root = Script.get_stack_root() tarball_map = default("/configurations/cluster-env/tarball_map", None) config_path = os.path.join(stack_root, "current/hadoop-client/conf") config_dir = os.path.realpath(config_path) # This is expected to be of the form #.#.#.# stack_version_unformatted = config['hostLevelParams']['stack_version'] stack_version_formatted_major = format_stack_version(stack_version_unformatted) stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager') stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major) stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major) # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade. # It cannot be used during the initial Cluser Install because the version is not yet known. version = default("/commandParams/version", None) # get the correct version to use for checking stack features version_for_stack_feature_checks = get_stack_feature_version(config) stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks) stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks) hostname = config['hostname'] # hadoop default parameters hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") hadoop_bin = stack_select.get_hadoop_dir("sbin") hadoop_bin_dir = stack_select.get_hadoop_dir("bin") hadoop_conf_dir = conf_select.get_hadoop_conf_dir() hadoop_yarn_home = '/usr/lib/hadoop-yarn' hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce" mapred_bin = "/usr/lib/hadoop-mapreduce/sbin" yarn_bin = "/usr/lib/hadoop-yarn/sbin" yarn_container_bin = "/usr/lib/hadoop-yarn/bin" hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir") # hadoop parameters stack supporting rolling_uprade if stack_supports_ru: # MapR directory root mapred_role_root = "hadoop-mapreduce-client" command_role = default("/role", "") if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP: mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role] # YARN directory root yarn_role_root = "hadoop-yarn-client" if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP: yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role] hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}") mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin") hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}") yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin") yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin") if stack_supports_timeline_state_store: # Timeline Service property that was added timeline_state_store stack feature ats_leveldb_state_store_dir = default('/configurations/yarn-site/yarn.timeline-service.leveldb-state-store.path', '/hadoop/yarn/timeline') # ats 1.5 properties entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir'] entity_groupfs_active_dir_mode = 01777 entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir'] entity_groupfs_store_dir_mode = 0700 hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure") limits_conf_dir = "/etc/security/limits.d" yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768") yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536") mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768") mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536") execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin ulimit_cmd = "ulimit -c unlimited;" mapred_user = status_params.mapred_user yarn_user = status_params.yarn_user hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp") smokeuser = config['configurations']['cluster-env']['smokeuser'] smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name'] smoke_hdfs_user_mode = 0770 security_enabled = config['configurations']['cluster-env']['security_enabled'] nm_security_marker_dir = "/var/lib/hadoop-yarn" nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled') current_nm_security_state = os.path.isfile(nm_security_marker) toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled) smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab'] yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group'] yarn_nodemanager_container_executor_class = config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class'] is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor') container_executor_mode = 06050 if is_linux_container_executor else 02050 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy'] yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY') rm_hosts = config['clusterHostInfo']['rm_host'] rm_host = rm_hosts[0] rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1] rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1] # TODO UPGRADE default, update site during upgrade rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude") rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path) java64_home = config['hostLevelParams']['java_home'] java_exec = format("{java64_home}/bin/java") hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False) yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize'] resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize'] nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize'] apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024) ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path'] ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK") yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix'] yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix'] mapred_env_sh_template = config['configurations']['mapred-env']['content'] yarn_env_sh_template = config['configurations']['yarn-env']['content'] yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None) service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default') if len(rm_hosts) > 1: additional_rm_host = rm_hosts[1] rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}") rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}") else: rm_webui_address = format("{rm_host}:{rm_port}") rm_webui_https_address = format("{rm_host}:{rm_https_port}") if security_enabled: tc_mode = 0644 tc_owner = "root" else: tc_mode = None tc_owner = hdfs_user nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address'] hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'] nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address'] # still contains 0.0.0.0 if hostname and nm_address and nm_address.startswith("0.0.0.0:"): nm_address = nm_address.replace("0.0.0.0", hostname) # Initialize lists of work directories. nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "") nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "") nm_local_dirs_list = nm_local_dirs.split(',') nm_log_dirs_list = nm_log_dirs.split(',') nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist" nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist" distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar" hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar" entity_file_history_directory = "/tmp/entity-file-history/active" yarn_pid_dir = status_params.yarn_pid_dir mapred_pid_dir = status_params.mapred_pid_dir mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}") yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}") mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log") yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log") user_group = config['configurations']['cluster-env']['user_group'] #exclude file exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", []) exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude") ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", [])) has_ats = not len(ats_host) == 0 nm_hosts = default("/clusterHostInfo/nm_hosts", []) # don't using len(nm_hosts) here, because check can take too much time on large clusters number_of_nm = 1 # default kinit commands rm_kinit_cmd = "" yarn_timelineservice_kinit_cmd = "" nodemanager_kinit_cmd = "" rm_zk_address = config['configurations']['yarn-site']['yarn.resourcemanager.zk-address'] rm_zk_znode = config['configurations']['yarn-site']['yarn.resourcemanager.zk-state-store.parent-path'] rm_zk_store_class = config['configurations']['yarn-site']['yarn.resourcemanager.store.class'] stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks) rm_zk_failover_znode = default('/configurations/yarn-site/yarn.resourcemanager.ha.automatic-failover.zk-base-path', '/yarn-leader-election') hadoop_registry_zk_root = default('/configurations/yarn-site/hadoop.registry.zk.root', '/registry') if security_enabled: rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal'] rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower()) rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab'] rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};") yarn_jaas_file = os.path.join(config_dir, 'yarn_jaas.conf') if stack_supports_zk_security: rm_security_opts = format('-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={yarn_jaas_file} -Dzookeeper.sasl.clientconfig=Client') # YARN timeline security options if has_ats: _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal'] _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower()) _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab'] yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};") if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']: _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None) if _nodemanager_principal_name: _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower()) _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab'] nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};") yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable'] yarn_nm_app_log_dir = config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir'] mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir'] mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir'] jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900") jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs") # Tez-related properties tez_user = config['configurations']['tez-env']['tez_user'] # Tez jars tez_local_api_jars = '/usr/lib/tez/tez*.jar' tez_local_lib_jars = '/usr/lib/tez/lib/*.jar' app_dir_files = {tez_local_api_jars:None} # Tez libraries tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None) #for create_hdfs_directory hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] hdfs_site = config['configurations']['hdfs-site'] default_fs = config['configurations']['core-site']['fs.defaultFS'] is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled'] # Path to file that contains list of HDFS resources to be skipped during processing hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore" dfs_type = default("/commandParams/dfs_type", "") import functools #create partial functions with common arguments for every HdfsResource call #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code HdfsResource = functools.partial( HdfsResource, user=hdfs_user, hdfs_resource_ignore_file = hdfs_resource_ignore_file, security_enabled = security_enabled, keytab = hdfs_user_keytab, kinit_path_local = kinit_path_local, hadoop_bin_dir = hadoop_bin_dir, hadoop_conf_dir = hadoop_conf_dir, principal_name = hdfs_principal_name, hdfs_site = hdfs_site, default_fs = default_fs, immutable_paths = get_not_managed_resources(), dfs_type = dfs_type ) update_exclude_file_only = default("/commandParams/update_exclude_file_only",False) mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group) #taskcontroller.cfg mapred_local_dir = "/tmp/hadoop-mapred/mapred/local" hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] min_user_id = config['configurations']['yarn-env']['min_user_id'] # Node labels node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None) node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled'] cgroups_dir = "/cgroups_test/cpu" # hostname of the active HDFS HA Namenode (only used when HA is enabled) dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None) if dfs_ha_namenode_active is not None: namenode_hostname = dfs_ha_namenode_active else: namenode_hostname = config['clusterHostInfo']['namenode_host'][0] ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin") scheme = 'http' if not yarn_https_on else 'https' yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address'] rm_active_port = rm_https_port if yarn_https_on else rm_port rm_ha_enabled = False rm_ha_ids_list = [] rm_webapp_addresses_list = [yarn_rm_address] rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None) if rm_ha_ids: rm_ha_ids_list = rm_ha_ids.split(",") if len(rm_ha_ids_list) > 1: rm_ha_enabled = True if rm_ha_enabled: rm_webapp_addresses_list = [] for rm_id in rm_ha_ids_list: rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}') rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property] rm_webapp_addresses_list.append(rm_webapp_address) # for curl command in ranger plugin to get db connector jdk_location = config['hostLevelParams']['jdk_location'] # ranger yarn plugin section start # ranger host ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", []) has_ranger_admin = not len(ranger_admin_hosts) == 0 # ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks) # ambari-server hostname ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] # ranger yarn plugin enabled property enable_ranger_yarn = default("/configurations/ranger-yarn-plugin-properties/ranger-yarn-plugin-enabled", "No") enable_ranger_yarn = True if enable_ranger_yarn.lower() == 'yes' else False # ranger yarn-plugin supported flag, instead of using is_supported_yarn_ranger/yarn-env, using stack feature is_supported_yarn_ranger = check_stack_feature(StackFeature.YARN_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks) # get ranger yarn properties if enable_ranger_yarn is True if enable_ranger_yarn and is_supported_yarn_ranger: # get ranger policy url policymgr_mgr_url = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.policy.rest.url'] if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'): policymgr_mgr_url = policymgr_mgr_url.rstrip('/') # ranger audit db user xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger') xa_audit_db_password = '' if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin: xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password'] # ranger yarn service/repository name repo_name = str(config['clusterName']) + '_yarn' repo_name_value = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.service.name'] if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}": repo_name = repo_name_value # ranger-env config ranger_env = config['configurations']['ranger-env'] # create ranger-env config having external ranger credential properties if not has_ranger_admin and enable_ranger_yarn: external_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_admin_username', 'admin') external_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_admin_password', 'admin') external_ranger_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin') external_ranger_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin') ranger_env = {} ranger_env['admin_username'] = external_admin_username ranger_env['admin_password'] = external_admin_password ranger_env['ranger_admin_username'] = external_ranger_admin_username ranger_env['ranger_admin_password'] = external_ranger_admin_password ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties'] policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user'] yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] ranger_plugin_config = { 'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'], 'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']), 'yarn.url' : format('{scheme}://{yarn_rest_url}'), 'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate'] } yarn_ranger_plugin_repo = { 'isEnabled': 'true', 'configs': ranger_plugin_config, 'description': 'yarn repo', 'name': repo_name, 'repositoryType': 'yarn', 'type': 'yarn', 'assetType': '1' } if stack_supports_ranger_kerberos: ranger_plugin_config['ambari.service.check.user'] = policy_user ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple' if stack_supports_ranger_kerberos and security_enabled: ranger_plugin_config['policy.download.auth.users'] = yarn_user ranger_plugin_config['tag.download.auth.users'] = yarn_user downloaded_custom_connector = None previous_jdbc_jar_name = None driver_curl_source = None driver_curl_target = None previous_jdbc_jar = None if has_ranger_admin and stack_supports_ranger_audit_db: xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR'] jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config) downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None xa_audit_db_is_enabled = False if xml_configurations_supported and stack_supports_ranger_audit_db: xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db'] xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False ssl_keystore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None ssl_truststore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None credential_file = format('/etc/ranger/{repo_name}/cred.jceks') # for SQLA explicitly disable audit to DB for Ranger if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla': xa_audit_db_is_enabled = False # ranger yarn plugin end section
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
controller/controller.go
package controller import ( "bytes" "compress/gzip" "context" "encoding/json" "fmt" "log" "net/http" "os" "strings" "time" "github.com/TwinProduction/gatus/config" "github.com/TwinProduction/gatus/security" "github.com/TwinProduction/gatus/storage" "github.com/TwinProduction/gocache" "github.com/TwinProduction/health" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" ) const ( cacheTTL = 10 * time.Second ) var ( cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.FirstInFirstOut) // staticFolder is the path to the location of the static folder from the root path of the project // The only reason this is exposed is to allow running tests from a different path than the root path of the project staticFolder = "./web/static" // server is the http.Server created by Handle. // The only reason it exists is for testing purposes. server *http.Server ) func init() { if err := cache.StartJanitor(); err != nil { log.Fatal("[controller][init] Failed to start cache janitor:", err.Error()) } } // Handle creates the router and starts the server func Handle() { cfg := config.Get() var router http.Handler = CreateRouter(cfg) if os.Getenv("ENVIRONMENT") == "dev" { router = developmentCorsHandler(router) } server = &http.Server{ Addr: fmt.Sprintf("%s:%d", cfg.Web.Address, cfg.Web.Port), Handler: router, ReadTimeout: 15 * time.Second, WriteTimeout: 15 * time.Second, IdleTimeout: 15 * time.Second, } log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress()) if os.Getenv("ROUTER_TEST") == "true" { return } log.Println("[controller][Handle]", server.ListenAndServe()) } // Shutdown stops the server func Shutdown() { if server != nil { _ = server.Shutdown(context.TODO()) server = nil } } // CreateRouter creates the router for the http server func CreateRouter(cfg *config.Config) *mux.Router { router := mux.NewRouter() if cfg.Metrics { router.Handle("/metrics", promhttp.Handler()).Methods("GET") } router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET") router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET") router.HandleFunc("/api/v1/statuses", secureIfNecessary(cfg, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(cfg, GzipHandlerFunc(serviceStatusHandler))).Methods("GET") router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET") // SPA router.HandleFunc("/services/{service}", spaHandler).Methods("GET") // Everything else falls back on static content router.PathPrefix("/").Handler(GzipHandler(http.FileServer(http.Dir(staticFolder)))) return router } func secureIfNecessary(cfg *config.Config, handler http.HandlerFunc) http.HandlerFunc { if cfg.Security != nil && cfg.Security.IsValid() { return security.Handler(handler, cfg.Security) } return handler } // serviceStatusesHandler handles requests to retrieve all service statuses // Due to the size of the response, this function leverages a cache. // Must not be wrapped by GzipHandler func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) { page, pageSize := extractPageAndPageSizeFromRequest(r) gzipped := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") var exists bool var value interface{} if gzipped { writer.Header().Set("Content-Encoding", "gzip") value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize)) } else { value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d", page, pageSize)) } var data []byte if !exists { var err error buffer := &bytes.Buffer{} gzipWriter := gzip.NewWriter(buffer) data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize)) if err != nil { log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error()) writer.WriteHeader(http.StatusInternalServerError) _, _ = writer.Write([]byte("Unable to marshal object to JSON")) return } _, _ = gzipWriter.Write(data) _ = gzipWriter.Close() gzippedData := buffer.Bytes() cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d", page, pageSize), data, cacheTTL) cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize), gzippedData, cacheTTL) if gzipped { data = gzippedData } } else { data = value.([]byte) } writer.Header().Add("Content-Type", "application/json") writer.WriteHeader(http.StatusOK) _, _ = writer.Write(data) } // serviceStatusHandler retrieves a single ServiceStatus by group name and service name func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) { page, pageSize := extractPageAndPageSizeFromRequest(r) vars := mux.Vars(r) serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"]) if serviceStatus == nil { log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"]) writer.WriteHeader(http.StatusNotFound) _, _ = writer.Write([]byte("not found")) return } data := map[string]interface{}{ "serviceStatus": serviceStatus.WithResultPagination(page, pageSize), // The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can // expose only the necessary data on /api/v1/statuses. // Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here "events": serviceStatus.Events, "uptime": serviceStatus.Uptime, } output, err := json.Marshal(data) if err != nil { log.Printf("[controller][serviceStatusHandler] Unable to marshal object to JSON: %s", err.Error()) writer.WriteHeader(http.StatusInternalServerError) _, _ = writer.Write([]byte("unable to marshal object to JSON")) return } writer.Header().Add("Content-Type", "application/json") writer.WriteHeader(http.StatusOK) _, _ = writer.Write(output) }
[ "\"ENVIRONMENT\"", "\"ROUTER_TEST\"" ]
[]
[ "ENVIRONMENT", "ROUTER_TEST" ]
[]
["ENVIRONMENT", "ROUTER_TEST"]
go
2
0
old_version/mmdet_apis_env_7ef08d32c0e2f8585b07423c9e027338ca16486f.py
import logging import os import random import subprocess import numpy as np import torch import torch.distributed as dist import torch.multiprocessing as mp from mmcv.runner import get_dist_info def init_dist(launcher, backend='nccl', **kwargs): if mp.get_start_method(allow_none=True) is None: mp.set_start_method('spawn') if launcher == 'pytorch': _init_dist_pytorch(backend, **kwargs) elif launcher == 'mpi': _init_dist_mpi(backend, **kwargs) elif launcher == 'slurm': _init_dist_slurm(backend, **kwargs) else: raise ValueError('Invalid launcher type: {}'.format(launcher)) def _init_dist_pytorch(backend, **kwargs): # TODO: use local_rank instead of rank % num_gpus rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device(rank % num_gpus) dist.init_process_group(backend=backend, **kwargs) def _init_dist_mpi(backend, **kwargs): raise NotImplementedError def _init_dist_slurm(backend, port=29500, **kwargs): proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device(proc_id % num_gpus) addr = subprocess.getoutput( 'scontrol show hostname {} | head -n1'.format(node_list)) os.environ['MASTER_PORT'] = str(port) os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend=backend) def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def get_root_logger(log_level=logging.INFO): logger = logging.getLogger() if not logger.hasHandlers(): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=log_level) rank, _ = get_dist_info() if rank != 0: logger.setLevel('ERROR') return logger
[]
[]
[ "MASTER_ADDR", "RANK", "MASTER_PORT", "SLURM_NTASKS", "SLURM_NODELIST", "SLURM_PROCID", "WORLD_SIZE" ]
[]
["MASTER_ADDR", "RANK", "MASTER_PORT", "SLURM_NTASKS", "SLURM_NODELIST", "SLURM_PROCID", "WORLD_SIZE"]
python
7
0
Django/Sekolah/sekolah/asgi.py
""" ASGI config for sekolah project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sekolah.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
connector/setup.py
#! /usr/bin/env python """Setup file for yang package See: https://packaging.python.org/en/latest/distributing.html """ import os import re import sys import shlex import unittest import subprocess from setuptools import setup, find_packages, Command from setuptools.command.test import test pkg_name = 'yang.connector' pkg_path = '/'.join(pkg_name.split('.')) class CleanCommand(Command): '''Custom clean command cleanup current directory: - removes build/ - removes src/*.egg-info - removes *.pyc and __pycache__ recursively Example ------- python setup.py clean ''' user_options = [] description = 'CISCO SHARED : Clean all build artifacts' def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('rm -vrf ./build ./dist ./src/*.egg-info') os.system('find . -type f -name "*.pyc" | xargs rm -vrf') os.system('find . -type d -name "__pycache__" | xargs rm -vrf') class TestCommand(Command): user_options = [] description = 'CISCO SHARED : Run unit tests against this package' def initialize_options(self): pass def finalize_options(self): pass def run(self): # where the tests are (relative to here) tests = os.path.join('src', pkg_path, 'tests') # call unittests sys.exit(unittest.main( module = None, argv = ['python -m unittest', 'discover', tests], failfast = True)) class BuildAndPreviewDocsCommand(Command): user_options = [] description = 'CISCO SHARED : Build and privately distribute ' \ 'Sphinx documentation for this package' def initialize_options(self): pass def finalize_options(self): pass def run(self): user = os.environ['USER'] sphinx_build_cmd = "sphinx-build -b html -c ../../docs " \ "-d ./__build__/documentation/doctrees docs/ ./__build__/documentation/html" target_dir = "/users/{user}/WWW/cisco_shared/{pkg_name}".\ format(user = user, pkg_name = pkg_path) mkdir_cmd = "mkdir -p {target_dir}".format(target_dir=target_dir) rsync_cmd = "rsync -rvc ./__build__/documentation/ {target_dir}".\ format(target_dir=target_dir) try: ret_code = subprocess.call(shlex.split(mkdir_cmd)) if not ret_code: ret_code = subprocess.call(shlex.split(sphinx_build_cmd)) if not ret_code: ret_code = subprocess.call(shlex.split(rsync_cmd)) #print("\nYou may preview the documentation at the following URL:") #print("http://wwwin-home.cisco.com/~{user}/cisco_shared/{pkg_name}/html".\ # format(user=user, pkg_name=pkg_path)) sys.exit(0) sys.exit(1) except Exception: sys.exit(1) def read(*paths): '''read and return txt content of file''' with open(os.path.join(os.path.dirname(__file__), *paths)) as fp: return fp.read() def find_version(*paths): '''reads a file and returns the defined __version__ value''' version_match = re.search(r"^__version__ ?= ?['\"]([^'\"]*)['\"]", read(*paths), re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") # launch setup setup( name = pkg_name, version = find_version('src', pkg_path, '__init__.py'), # descriptions description = 'YANG defined interface API protocol connector', long_description = 'Client capable of NETCONF and gNMI protocol', # the package's documentation page. url = 'https://github.com/CiscoTestAutomation/yang.git', # author details author = 'Jonathan Yang', author_email = '[email protected]', maintainer_email = '[email protected]', # project licensing license = 'Apache 2.0', platforms = ['Linux',], # see https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Telecommunications Industry', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Software Development :: Testing', ], # uses namespace package namespace_packages = ['yang'], # project keywords keywords = 'pyats cisco-shared', # project packages packages = find_packages(where = 'src'), # project directory package_dir = { '': 'src', }, # additional package data files that goes into the package itself package_data = {'':['README.rst']}, # Standalone scripts scripts = [ ], # console entry point entry_points = { }, # package dependencies install_requires = [ 'paramiko >= 1.15.1', 'lxml >= 3.3.0', 'ncclient >= 0.6.6', 'grpcio <= 1.28.1', 'cisco-gnmi >= 1.0.13, < 2.0.0', ], # any additional groups of dependencies. # install using: $ pip install -e .[dev] extras_require = { 'dev': ['coverage', 'restview', 'Sphinx', 'sphinxcontrib-napoleon', 'sphinx-rtd-theme'], }, # any data files placed outside this package. # See: http://docs.python.org/3.4/distutils/setupscript.html # format: # [('target', ['list', 'of', 'files'])] # where target is sys.prefix/<target> data_files = [], # custom commands for setup.py cmdclass = { 'clean': CleanCommand, 'test': TestCommand, 'docs': BuildAndPreviewDocsCommand, }, # non zip-safe (never tested it) zip_safe = False, )
[]
[]
[ "USER" ]
[]
["USER"]
python
1
0
api/settings.go
package handler import ( "context" "encoding/json" "fmt" "github.com/go-redis/redis/v8" "net/http" "os" "strconv" ) var rdb *redis.Client var ctx = context.Background() var settings *Settings = nil type Settings struct { Lat float64 `json:"lat"` Lng float64 `json:"lng"` TrackDiesel bool `json:"diesel"` TrackSuper bool `json:"super"` TrackE10 bool `json:"e10"` SetHome bool `json:"setHome"` } func setupPersistency() { redisUrl := os.Getenv("REDIS_URL") opt, _ := redis.ParseURL(redisUrl) rdb = redis.NewClient(opt) } func saveSettings(id int64) { key := os.Getenv("VERCEL_ENV") + "/" + strconv.FormatInt(id, 10) js, _ := json.Marshal(settings) rdb.Set(ctx, key, js, 0) } func loadSettings(id int64) { var s Settings key := os.Getenv("VERCEL_ENV") + "/" + strconv.FormatInt(id, 10) val, err := rdb.Get(ctx, key).Result() if err == nil { _ = json.Unmarshal([]byte(val), &s) settings = &s } else { settings = &Settings{ Lat: 0, Lng: 0, TrackDiesel: true, TrackSuper: true, TrackE10: true, SetHome: false, } } } func SettingsHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) _, _ = fmt.Fprintf(w, "ok") }
[ "\"REDIS_URL\"", "\"VERCEL_ENV\"", "\"VERCEL_ENV\"" ]
[]
[ "REDIS_URL", "VERCEL_ENV" ]
[]
["REDIS_URL", "VERCEL_ENV"]
go
2
0
herokuapp/project_template/project_name/settings/production.py
""" Django settings for bar project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ import os import dj_database_url from django.utils.crypto import get_random_string # Build paths inside the project like this: os.path.join(BASE_DIR, ...) SITE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) BASE_DIR = os.path.abspath(os.path.join(SITE_ROOT, "..")) # Heroku platform settings. HEROKU_APP_NAME = "{{ app_name }}" HEROKU_BUILDPACK_URL = "https://github.com/heroku/heroku-buildpack-python.git" # The name and domain of this site. SITE_NAME = "Example" SITE_DOMAIN = "{{ app_name }}.herokuapp.com" PREPEND_WWW = False # Security settings. SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") ALLOWED_HOSTS = ( SITE_DOMAIN, "{HEROKU_APP_NAME}.herokuapp.com".format( HEROKU_APP_NAME = HEROKU_APP_NAME, ), ) # Database settings. DATABASES = { "default": dj_database_url.config(default="postgresql://"), } # Use Amazon S3 for storage for uploaded media files. DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage" # Use Amazon S3 and RequireJS for static files storage. STATICFILES_STORAGE = "require_s3.storage.OptimizedCachedStaticFilesStorage" # Amazon S3 settings. AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY") AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME") AWS_AUTO_CREATE_BUCKET = True AWS_HEADERS = { "Cache-Control": "public, max-age=86400", } AWS_S3_FILE_OVERWRITE = False AWS_QUERYSTRING_AUTH = False AWS_S3_SECURE_URLS = True AWS_REDUCED_REDUNDANCY = False AWS_IS_GZIPPED = False STATIC_URL = "https://{bucket_name}.s3.amazonaws.com/".format( bucket_name = AWS_STORAGE_BUCKET_NAME, ) # Email settings. EMAIL_HOST = "smtp.sendgrid.net" EMAIL_HOST_USER = os.environ.get("SENDGRID_USERNAME") EMAIL_HOST_PASSWORD = os.environ.get("SENDGRID_PASSWORD") EMAIL_PORT = 25 EMAIL_USE_TLS = False SERVER_EMAIL = u"{name} <notifications@{domain}>".format( name = SITE_NAME, domain = SITE_DOMAIN, ) DEFAULT_FROM_EMAIL = SERVER_EMAIL EMAIL_SUBJECT_PREFIX = "[%s] " % SITE_NAME # Error reporting settings. Use these to set up automatic error notifications. ADMINS = () MANAGERS = () SEND_BROKEN_LINK_EMAILS = False # Locale settings. TIME_ZONE = "UTC" LANGUAGE_CODE = "en-gb" USE_I18N = True USE_L10N = True USE_TZ = True # A list of additional installed applications. INSTALLED_APPS = ( "django.contrib.sessions", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.messages", "django.contrib.staticfiles", "django.contrib.admin", "herokuapp", ) # Additional static file locations. STATICFILES_DIRS = ( os.path.join(SITE_ROOT, "static"), ) # Dispatch settings. MIDDLEWARE_CLASSES = ( "django.middleware.gzip.GZipMiddleware", "herokuapp.middleware.CanonicalDomainMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ) ROOT_URLCONF = "{{ project_name }}.urls" WSGI_APPLICATION = "{{ project_name }}.wsgi.application" SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies" MESSAGE_STORAGE = "django.contrib.messages.storage.cookie.CookieStorage" SITE_ID = 1 # Absolute path to the directory where templates are stored. TEMPLATE_DIRS = ( os.path.join(SITE_ROOT, "templates"), ) TEMPLATE_LOADERS = ( ("django.template.loaders.cached.Loader", ( "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", )), ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.tz", # "django.core.context_processors.request", "django.contrib.messages.context_processors.messages", ) # Namespace for cache keys, if using a process-shared cache. CACHE_MIDDLEWARE_KEY_PREFIX = "{{ project_name }}" CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, # Long cache timeout for staticfiles, since this is used heavily by the optimizing storage. "staticfiles": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "TIMEOUT": 60 * 60 * 24 * 365, "LOCATION": "staticfiles", }, } # A secret key used for cryptographic algorithms. SECRET_KEY = os.environ.get("SECRET_KEY", get_random_string(50, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")) # Logging configuration. LOGGING = { "version": 1, # Don't throw away default loggers. "disable_existing_loggers": False, "handlers": { # Redefine console logger to run in production. "console": { "level": "INFO", "class": "logging.StreamHandler", }, }, "loggers": { # Redefine django logger to use redefined console logging. "django": { "handlers": ["console"], } } }
[]
[]
[ "AWS_SECRET_ACCESS_KEY", "AWS_STORAGE_BUCKET_NAME", "SENDGRID_PASSWORD", "SECRET_KEY", "SENDGRID_USERNAME", "AWS_ACCESS_KEY_ID" ]
[]
["AWS_SECRET_ACCESS_KEY", "AWS_STORAGE_BUCKET_NAME", "SENDGRID_PASSWORD", "SECRET_KEY", "SENDGRID_USERNAME", "AWS_ACCESS_KEY_ID"]
python
6
0
horovod/run/util/lsf.py
# Copyright IBM Corp. 2020. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import io import os import yaml from horovod.common.util import _cache from horovod.run.common.util import safe_shell_exec class LSFUtils: """LSF Utilities""" _CSM_ALLOCATION_QUERY = "/opt/ibm/csm/bin/csm_allocation_query" _CSM_NODE_QUERY = "/opt/ibm/csm/bin/csm_node_attributes_query" _LSCPU_CMD = "LANG=en_US.utf8 lscpu" _THREAD_KEY= "Thread(s) per core" _csm_allocation_info = {} @staticmethod def using_lsf(): """Returns True if LSF was used to start the current process.""" return "LSB_JOBID" in os.environ @staticmethod def get_allocation_info(): """Returns and sets the static CSM allocation info.""" if not LSFUtils._csm_allocation_info: lsf_allocation_id = os.environ["CSM_ALLOCATION_ID"].strip() output = io.StringIO() exit_code = safe_shell_exec.execute("{cmd} -a {allocation}".format( cmd=LSFUtils._CSM_ALLOCATION_QUERY, allocation=lsf_allocation_id), stdout=output, stderr=output) if exit_code != 0: raise RuntimeError( "{cmd} failed with exit code {exit_code}".format( cmd=LSFUtils._CSM_ALLOCATION_QUERY, exit_code=exit_code)) LSFUtils._csm_allocation_info = yaml.safe_load(output.getvalue()) # Fetch the total number of cores and gpus for the first host output = io.StringIO() exit_code = safe_shell_exec.execute("{cmd} -n {node}".format( cmd=LSFUtils._CSM_NODE_QUERY, node=LSFUtils._csm_allocation_info["compute_nodes"][0]), stdout=output, stderr=output) if exit_code != 0: raise RuntimeError( "{cmd} failed with exit code {exit_code}".format( cmd=LSFUtils._CSM_NODE_QUERY, exit_code=exit_code)) node_output = yaml.safe_load(output.getvalue()) total_core_count = (int(node_output["Record_1"]["discovered_cores"]) - int(node_output["Record_1"]["discovered_sockets"]) * LSFUtils._csm_allocation_info["isolated_cores"]) LSFUtils._csm_allocation_info["compute_node_cores"]= total_core_count LSFUtils._csm_allocation_info["compute_node_gpus"] = int(node_output["Record_1"]["discovered_gpus"]) # Sorting LSF hostnames LSFUtils._csm_allocation_info["compute_nodes"].sort() return LSFUtils._csm_allocation_info @staticmethod def get_compute_hosts(): """Returns the list of LSF compute hosts.""" return LSFUtils.get_allocation_info()["compute_nodes"] @staticmethod def get_num_cores(): """Returns the number of cores per node.""" return LSFUtils.get_allocation_info()["compute_node_cores"] @staticmethod def get_num_gpus(): """Returns the number of gpus per node.""" return LSFUtils.get_allocation_info()["compute_node_gpus"] @staticmethod @_cache def get_num_processes(): """Returns the total number of processes.""" return len(LSFUtils.get_compute_hosts()) * LSFUtils.get_num_gpus() @staticmethod @_cache def get_num_threads(): """Returns the number of hardware threads.""" lscpu_cmd = 'ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no ' \ '{host} {cmd}'.format( host=LSFUtils.get_compute_hosts()[0], cmd=LSFUtils._LSCPU_CMD ) output = io.StringIO() exit_code = safe_shell_exec.execute(lscpu_cmd, stdout=output, stderr=output) if exit_code != 0: raise RuntimeError("{cmd} failed with exit code {exit_code}".format( cmd=lscpu_cmd, exit_code=exit_code)) return int(yaml.safe_load(output.getvalue())[LSFUtils._THREAD_KEY])
[]
[]
[ "CSM_ALLOCATION_ID" ]
[]
["CSM_ALLOCATION_ID"]
python
1
0
plugins/telemetry/telemetry.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package telemetry import ( "context" "fmt" "net/http" "os" "sync" "time" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/unrolled/render" "go.ligato.io/cn-infra/v2/infra" "go.ligato.io/cn-infra/v2/logging" "go.ligato.io/cn-infra/v2/rpc/grpc" prom "go.ligato.io/cn-infra/v2/rpc/prometheus" "go.ligato.io/cn-infra/v2/rpc/rest" "go.ligato.io/cn-infra/v2/servicelabel" "go.ligato.io/vpp-agent/v3/pkg/metrics" "go.ligato.io/vpp-agent/v3/pkg/models" "go.ligato.io/vpp-agent/v3/plugins/govppmux" "go.ligato.io/vpp-agent/v3/plugins/telemetry/vppcalls" "go.ligato.io/vpp-agent/v3/plugins/vpp/ifplugin/ifaceidx" "go.ligato.io/vpp-agent/v3/proto/ligato/configurator" _ "go.ligato.io/vpp-agent/v3/plugins/telemetry/vppcalls/vpp1908" _ "go.ligato.io/vpp-agent/v3/plugins/telemetry/vppcalls/vpp2001" _ "go.ligato.io/vpp-agent/v3/plugins/telemetry/vppcalls/vpp2005" _ "go.ligato.io/vpp-agent/v3/plugins/telemetry/vppcalls/vpp2009" ) var debug = os.Getenv("DEBUG_TELEMETRY") != "" // Plugin registers Telemetry Plugin type Plugin struct { Deps handler vppcalls.TelemetryVppAPI statsPollerServer prometheusMetrics // From config file updatePeriod time.Duration disabled bool prometheusDisabled bool skipped map[string]bool wg sync.WaitGroup quit chan struct{} } type InterfaceIndexProvider interface { // GetInterfaceIndex gives read-only access to map with metadata of all configured // VPP interfaces. GetInterfaceIndex() ifaceidx.IfaceMetadataIndex } // Deps represents dependencies of Telemetry Plugin type Deps struct { infra.PluginDeps ServiceLabel servicelabel.ReaderAPI VPP govppmux.API Prometheus prom.API GRPC grpc.Server HTTPHandlers rest.HTTPHandlers IfPlugin InterfaceIndexProvider } // Init initializes Telemetry Plugin func (p *Plugin) Init() error { p.quit = make(chan struct{}) p.skipped = make(map[string]bool, 0) // Telemetry config file config, err := p.loadConfig() if err != nil { return err } if config != nil { // If telemetry is not enabled, skip plugin initialization if config.Disabled { p.Log.Info("Telemetry plugin disabled via config file") p.disabled = true return nil } // Disable prometheus metrics if set by config if config.PrometheusDisabled { p.Log.Info("Prometheus metrics disabled via config file") p.prometheusDisabled = true } else { // This prevents setting the update period to less than 5 seconds, // which can have significant performance hit. if config.PollingInterval > minimumUpdatePeriod { p.updatePeriod = config.PollingInterval p.Log.Infof("polling period changed to %v", p.updatePeriod) } else if config.PollingInterval > 0 { p.Log.Warnf("polling period has to be at least %s, using default: %v", minimumUpdatePeriod, defaultUpdatePeriod) } // Store map of skipped metrics for _, skip := range config.Skipped { p.skipped[skip] = true } } } // Register prometheus if !p.prometheusDisabled { if p.updatePeriod == 0 { p.updatePeriod = defaultUpdatePeriod } if err := p.registerPrometheus(); err != nil { return err } } // Setup stats poller p.statsPollerServer.log = p.Log.NewLogger("stats-poller") if err := p.setupStatsPoller(); err != nil { return errors.WithMessage(err, "setting up stats poller failed") } if p.HTTPHandlers != nil { p.HTTPHandlers.RegisterHTTPHandler("/metrics/{metric}", metricsHandler, "GET") } return nil } // AfterInit executes after initializion of Telemetry Plugin func (p *Plugin) AfterInit() error { // Do not start polling if telemetry is disabled if p.disabled || p.prometheusDisabled { return nil } p.startPeriodicUpdates() return nil } func (p *Plugin) setupStatsPoller() error { h := vppcalls.CompatibleTelemetryHandler(p.VPP) if h == nil { p.Log.Warnf("VPP telemetry handler unavailable") } else { p.statsPollerServer.handler = h } p.statsPollerServer.ifIndex = p.IfPlugin.GetInterfaceIndex() if p.GRPC != nil && p.GRPC.GetServer() != nil { configurator.RegisterStatsPollerServiceServer(p.GRPC.GetServer(), &p.statsPollerServer) } return nil } // Close is used to clean up resources used by Telemetry Plugin func (p *Plugin) Close() error { close(p.quit) p.wg.Wait() return nil } func (p *Plugin) startPeriodicUpdates() { p.handler = vppcalls.CompatibleTelemetryHandler(p.VPP) if p.handler == nil { p.Log.Warnf("VPP telemetry handler unavailable, skipping periodic updates") return } p.wg.Add(1) go p.periodicUpdates() } // periodic updates for the metrics data func (p *Plugin) periodicUpdates() { defer p.wg.Done() p.Log.Debugf("starting periodic updates (%v)", p.updatePeriod) defer p.Log.Debugf("stopping periodic updates") tick := time.NewTicker(p.updatePeriod) for { select { case <-tick.C: ctx := context.Background() p.updatePrometheus(ctx) case <-p.quit: return } } } func (p *Plugin) tracef(f string, a ...interface{}) { if debug && p.Log.GetLevel() >= logging.DebugLevel { s := fmt.Sprintf(f, a...) if len(s) > 250 { p.Log.Debugf("%s... (%d bytes omitted) ...%s", s[:200], len(s)-250, s[len(s)-50:]) return } p.Log.Debug(s) } } func metricsHandler(formatter *render.Render) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if vars == nil { _ = formatter.JSON(w, http.StatusNotFound, struct{}{}) return } metric := vars["metric"] model, err := models.DefaultRegistry.GetModel(metric) if err != nil { _ = formatter.JSON(w, http.StatusNotFound, struct{ Error string }{err.Error()}) return } data := model.NewInstance() if err := metrics.Retrieve(data); err != nil { _ = formatter.JSON(w, http.StatusInternalServerError, struct{ Error string }{err.Error()}) return } _ = formatter.JSON(w, 200, data) } }
[ "\"DEBUG_TELEMETRY\"" ]
[]
[ "DEBUG_TELEMETRY" ]
[]
["DEBUG_TELEMETRY"]
go
1
0
controllers/password.go
/* Copyright (c) 2016 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "bytes" "net/http" "os" "regexp" "text/template" "github.com/vmware/harbor/dao" "github.com/vmware/harbor/models" "github.com/vmware/harbor/utils" "github.com/vmware/harbor/utils/log" "github.com/astaxie/beego" ) // ChangePasswordController handles request to /changePassword type ChangePasswordController struct { BaseController } // Get renders the page for user to change password. func (cpc *ChangePasswordController) Get() { sessionUserID := cpc.GetSession("userId") if sessionUserID == nil { cpc.Redirect("/signIn", http.StatusFound) return } cpc.Data["Username"] = cpc.GetSession("username") cpc.ForwardTo("page_title_change_password", "change-password") } // ForgotPasswordController handles request to /forgotPassword type ForgotPasswordController struct { BaseController } // Get Renders the page for user to input Email to reset password. func (fpc *ForgotPasswordController) Get() { fpc.ForwardTo("page_title_forgot_password", "forgot-password") } type messageDetail struct { Hint string URL string UUID string } // SendEmail verifies the Email address and contact SMTP server to send reset password Email. func (cc *CommonController) SendEmail() { email := cc.GetString("email") pass, _ := regexp.MatchString(`^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$`, email) if !pass { cc.CustomAbort(http.StatusBadRequest, "email_content_illegal") } else { queryUser := models.User{Email: email} exist, err := dao.UserExists(queryUser, "email") if err != nil { log.Errorf("Error occurred in UserExists: %v", err) cc.CustomAbort(http.StatusInternalServerError, "Internal error.") } if !exist { cc.CustomAbort(http.StatusNotFound, "email_does_not_exist") } messageTemplate, err := template.ParseFiles("views/reset-password-mail.tpl") if err != nil { log.Errorf("Parse email template file failed: %v", err) cc.CustomAbort(http.StatusInternalServerError, err.Error()) } message := new(bytes.Buffer) harborURL := os.Getenv("HARBOR_URL") if harborURL == "" { harborURL = "localhost" } uuid, err := dao.GenerateRandomString() if err != nil { log.Errorf("Error occurred in GenerateRandomString: %v", err) cc.CustomAbort(http.StatusInternalServerError, "Internal error.") } err = messageTemplate.Execute(message, messageDetail{ Hint: cc.Tr("reset_email_hint"), URL: harborURL, UUID: uuid, }) if err != nil { log.Errorf("Message template error: %v", err) cc.CustomAbort(http.StatusInternalServerError, "internal_error") } config, err := beego.AppConfig.GetSection("mail") if err != nil { log.Errorf("Can not load app.conf: %v", err) cc.CustomAbort(http.StatusInternalServerError, "internal_error") } mail := utils.Mail{ From: config["from"], To: []string{email}, Subject: cc.Tr("reset_email_subject"), Message: message.String()} err = mail.SendMail() if err != nil { log.Errorf("Send email failed: %v", err) cc.CustomAbort(http.StatusInternalServerError, "send_email_failed") } user := models.User{ResetUUID: uuid, Email: email} dao.UpdateUserResetUUID(user) } } // ResetPasswordController handles request to /resetPassword type ResetPasswordController struct { BaseController } // Get checks if reset_uuid in the reset link is valid and render the result page for user to reset password. func (rpc *ResetPasswordController) Get() { resetUUID := rpc.GetString("reset_uuid") if resetUUID == "" { log.Error("Reset uuid is blank.") rpc.Redirect("/", http.StatusFound) return } queryUser := models.User{ResetUUID: resetUUID} user, err := dao.GetUser(queryUser) if err != nil { log.Errorf("Error occurred in GetUser: %v", err) rpc.CustomAbort(http.StatusInternalServerError, "Internal error.") } if user != nil { rpc.Data["ResetUuid"] = user.ResetUUID rpc.ForwardTo("page_title_reset_password", "reset-password") } else { rpc.Redirect("/", http.StatusFound) } } // ResetPassword handles request from the reset page and reset password func (cc *CommonController) ResetPassword() { resetUUID := cc.GetString("reset_uuid") if resetUUID == "" { cc.CustomAbort(http.StatusBadRequest, "Reset uuid is blank.") } queryUser := models.User{ResetUUID: resetUUID} user, err := dao.GetUser(queryUser) if err != nil { log.Errorf("Error occurred in GetUser: %v", err) cc.CustomAbort(http.StatusInternalServerError, "Internal error.") } if user == nil { log.Error("User does not exist") cc.CustomAbort(http.StatusBadRequest, "User does not exist") } password := cc.GetString("password") if password != "" { user.Password = password err = dao.ResetUserPassword(*user) if err != nil { log.Errorf("Error occurred in ResetUserPassword: %v", err) cc.CustomAbort(http.StatusInternalServerError, "Internal error.") } } else { cc.CustomAbort(http.StatusBadRequest, "password_is_required") } }
[ "\"HARBOR_URL\"" ]
[]
[ "HARBOR_URL" ]
[]
["HARBOR_URL"]
go
1
0
src/lambda_function.py
import boto3 import logging import os import shutil from urllib.parse import urlparse, unquote import git import json logger = logging.getLogger() logger.setLevel(logging.INFO) WORK_DIR = '/tmp' TEMP_DIR = '.repo' TARGET_REFS = [ 'refs/heads/' + branch for branch in os.getenv('TARGET_BRANCH', '').split(',') ] BUCKET_NAME = os.getenv('BUCKET_NAME') USERNAME = os.getenv('USERNAME') PASSWORD = os.getenv('PASSWORD') FILE_PATH = os.getenv('FILE_PATH') SNS_TOPIC = os.getenv('SNS_TOPIC') s3 = boto3.resource('s3') sns = boto3.resource('sns') def lambda_handler(event, context): # backlog payload logger.info(event) payload = json.loads(unquote(event['body'].replace('payload=', ''))) logger.info(payload) if payload['ref'] in TARGET_REFS: # generate repository url _ = urlparse(payload['repository']['url']) url = _.scheme + '://' + USERNAME + ':' + PASSWORD + '@' + _.netloc + _.path + '.git' logger.info(url) # commit id commit = payload['after'] os.chdir(WORK_DIR) logger.info(os.listdir(os.getcwd())) if not os.path.isdir(TEMP_DIR): os.mkdir(TEMP_DIR) else: shutil.rmtree(TEMP_DIR) # git clone try: git.exec_command('clone', url, TEMP_DIR) git.exec_command('checkout', commit, cwd=TEMP_DIR) shutil.make_archive('src', 'zip', TEMP_DIR) logger.info(os.listdir(os.getcwd())) s3.meta.client.upload_file('src.zip', BUCKET_NAME, FILE_PATH) except Exception as e: logger.error(e) sns.Topic(SNS_TOPIC).publish( Subject='FAILED TO UPLOAD', Message=str(e) ) else: sns.Topic(SNS_TOPIC).publish( Subject='UPLOAD COMPLETE', Message=json.dumps(payload) ) return { 'statusCode': 200 }
[]
[]
[ "USERNAME", "PASSWORD", "TARGET_BRANCH", "BUCKET_NAME", "SNS_TOPIC", "FILE_PATH" ]
[]
["USERNAME", "PASSWORD", "TARGET_BRANCH", "BUCKET_NAME", "SNS_TOPIC", "FILE_PATH"]
python
6
0
cv/asgi.py
""" ASGI config for cv project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cv.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
jira/agile/examples/board/filter/filter.go
package main import ( "context" "github.com/ctreminiom/go-atlassian/jira" "log" "os" ) func main() { var ( host = os.Getenv("HOST") mail = os.Getenv("MAIL") token = os.Getenv("TOKEN") ) atlassian, err := jira.New(nil, host) if err != nil { return } atlassian.Auth.SetBasicAuth(mail, token) atlassian.Auth.SetUserAgent("curl/7.54.0") var ( filterID = 10016 startAt = 0 maxResult = 50 ) boards, response, err := atlassian.Agile.Board.Filter(context.Background(), filterID, startAt, maxResult) if err != nil { if response != nil { log.Println("Response HTTP Response", response.Bytes.String()) } log.Fatal(err) } log.Println("Response HTTP Code", response.Code) log.Println("HTTP Endpoint Used", response.Endpoint) for _, board := range boards.Values { log.Println(board.Name, board.ID, board.Type) } }
[ "\"HOST\"", "\"MAIL\"", "\"TOKEN\"" ]
[]
[ "MAIL", "HOST", "TOKEN" ]
[]
["MAIL", "HOST", "TOKEN"]
go
3
0
Lib/test/test_embed.py
# Run the tests in Programs/_testembed.c (tests for the CPython embedding APIs) from test import support import unittest from collections import namedtuple import os import re import subprocess import sys class EmbeddingTestsMixin: def setUp(self): here = os.path.abspath(__file__) basepath = os.path.dirname(os.path.dirname(os.path.dirname(here))) exename = "_testembed" if sys.platform.startswith("win"): ext = ("_d" if "_d" in sys.executable else "") + ".exe" exename += ext exepath = os.path.dirname(sys.executable) else: exepath = os.path.join(basepath, "Programs") self.test_exe = exe = os.path.join(exepath, exename) if not os.path.exists(exe): self.skipTest("%r doesn't exist" % exe) # This is needed otherwise we get a fatal error: # "Py_Initialize: Unable to get the locale encoding # LookupError: no codec search functions registered: can't find encoding" self.oldcwd = os.getcwd() os.chdir(basepath) def tearDown(self): os.chdir(self.oldcwd) def run_embedded_interpreter(self, *args, env=None): """Runs a test in the embedded interpreter""" cmd = [self.test_exe] cmd.extend(args) if env is not None and sys.platform == 'win32': # Windows requires at least the SYSTEMROOT environment variable to # start Python. env = env.copy() env['SYSTEMROOT'] = os.environ['SYSTEMROOT'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=env) (out, err) = p.communicate() if p.returncode != 0 and support.verbose: print(f"--- {cmd} failed ---") print(f"stdout:\n{out}") print(f"stderr:\n{err}") print(f"------") self.assertEqual(p.returncode, 0, "bad returncode %d, stderr is %r" % (p.returncode, err)) return out, err def run_repeated_init_and_subinterpreters(self): out, err = self.run_embedded_interpreter("repeated_init_and_subinterpreters") self.assertEqual(err, "") # The output from _testembed looks like this: # --- Pass 0 --- # interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728 # interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784 # interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368 # interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200 # interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728 # --- Pass 1 --- # ... interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, " r"thread state <(0x[\dA-F]+)>: " r"id\(modules\) = ([\d]+)$") Interp = namedtuple("Interp", "id interp tstate modules") numloops = 0 current_run = [] for line in out.splitlines(): if line == "--- Pass {} ---".format(numloops): self.assertEqual(len(current_run), 0) if support.verbose > 1: print(line) numloops += 1 continue self.assertLess(len(current_run), 5) match = re.match(interp_pat, line) if match is None: self.assertRegex(line, interp_pat) # Parse the line from the loop. The first line is the main # interpreter and the 3 afterward are subinterpreters. interp = Interp(*match.groups()) if support.verbose > 1: print(interp) self.assertTrue(interp.interp) self.assertTrue(interp.tstate) self.assertTrue(interp.modules) current_run.append(interp) # The last line in the loop should be the same as the first. if len(current_run) == 5: main = current_run[0] self.assertEqual(interp, main) yield current_run current_run = [] class EmbeddingTests(EmbeddingTestsMixin, unittest.TestCase): def test_subinterps_main(self): for run in self.run_repeated_init_and_subinterpreters(): main = run[0] self.assertEqual(main.id, '0') def test_subinterps_different_ids(self): for run in self.run_repeated_init_and_subinterpreters(): main, *subs, _ = run mainid = int(main.id) for i, sub in enumerate(subs): self.assertEqual(sub.id, str(mainid + i + 1)) def test_subinterps_distinct_state(self): for run in self.run_repeated_init_and_subinterpreters(): main, *subs, _ = run if '0x0' in main: # XXX Fix on Windows (and other platforms): something # is going on with the pointers in Programs/_testembed.c. # interp.interp is 0x0 and interp.modules is the same # between interpreters. raise unittest.SkipTest('platform prints pointers as 0x0') for sub in subs: # A new subinterpreter may have the same # PyInterpreterState pointer as a previous one if # the earlier one has already been destroyed. So # we compare with the main interpreter. The same # applies to tstate. self.assertNotEqual(sub.interp, main.interp) self.assertNotEqual(sub.tstate, main.tstate) self.assertNotEqual(sub.modules, main.modules) def test_forced_io_encoding(self): # Checks forced configuration of embedded interpreter IO streams env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape") out, err = self.run_embedded_interpreter("forced_io_encoding", env=env) if support.verbose > 1: print() print(out) print(err) expected_stream_encoding = "utf-8" expected_errors = "surrogateescape" expected_output = '\n'.join([ "--- Use defaults ---", "Expected encoding: default", "Expected errors: default", "stdin: {in_encoding}:{errors}", "stdout: {out_encoding}:{errors}", "stderr: {out_encoding}:backslashreplace", "--- Set errors only ---", "Expected encoding: default", "Expected errors: ignore", "stdin: {in_encoding}:ignore", "stdout: {out_encoding}:ignore", "stderr: {out_encoding}:backslashreplace", "--- Set encoding only ---", "Expected encoding: latin-1", "Expected errors: default", "stdin: latin-1:{errors}", "stdout: latin-1:{errors}", "stderr: latin-1:backslashreplace", "--- Set encoding and errors ---", "Expected encoding: latin-1", "Expected errors: replace", "stdin: latin-1:replace", "stdout: latin-1:replace", "stderr: latin-1:backslashreplace"]) expected_output = expected_output.format( in_encoding=expected_stream_encoding, out_encoding=expected_stream_encoding, errors=expected_errors) # This is useful if we ever trip over odd platform behaviour self.maxDiff = None self.assertEqual(out.strip(), expected_output) def test_pre_initialization_api(self): """ Checks some key parts of the C-API that need to work before the runtine is initialized (via Py_Initialize()). """ env = dict(os.environ, PYTHONPATH=os.pathsep.join(sys.path)) out, err = self.run_embedded_interpreter("pre_initialization_api", env=env) if sys.platform == "win32": expected_path = self.test_exe else: expected_path = os.path.join(os.getcwd(), "spam") expected_output = f"sys.executable: {expected_path}\n" self.assertIn(expected_output, out) self.assertEqual(err, '') def test_pre_initialization_sys_options(self): """ Checks that sys.warnoptions and sys._xoptions can be set before the runtime is initialized (otherwise they won't be effective). """ env = dict(os.environ, PYTHONPATH=os.pathsep.join(sys.path)) out, err = self.run_embedded_interpreter( "pre_initialization_sys_options", env=env) expected_output = ( "sys.warnoptions: ['once', 'module', 'default']\n" "sys._xoptions: {'not_an_option': '1', 'also_not_an_option': '2'}\n" "warnings.filters[:3]: ['default', 'module', 'once']\n" ) self.assertIn(expected_output, out) self.assertEqual(err, '') def test_bpo20891(self): """ bpo-20891: Calling PyGILState_Ensure in a non-Python thread before calling PyEval_InitThreads() must not crash. PyGILState_Ensure() must call PyEval_InitThreads() for us in this case. """ out, err = self.run_embedded_interpreter("bpo20891") self.assertEqual(out, '') self.assertEqual(err, '') def test_initialize_twice(self): """ bpo-33932: Calling Py_Initialize() twice should do nothing (and not crash!). """ out, err = self.run_embedded_interpreter("initialize_twice") self.assertEqual(out, '') self.assertEqual(err, '') def test_initialize_pymain(self): """ bpo-34008: Calling Py_Main() after Py_Initialize() must not fail. """ out, err = self.run_embedded_interpreter("initialize_pymain") self.assertEqual(out.rstrip(), "Py_Main() after Py_Initialize: sys.argv=['-c', 'arg2']") self.assertEqual(err, '') class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase): maxDiff = 4096 DEFAULT_CONFIG = { 'install_signal_handlers': 1, 'Py_IgnoreEnvironmentFlag': 0, 'use_hash_seed': 0, 'hash_seed': 0, 'allocator': '(null)', 'dev_mode': 0, 'faulthandler': 0, 'tracemalloc': 0, 'import_time': 0, 'show_ref_count': 0, 'show_alloc_count': 0, 'dump_refs': 0, 'malloc_stats': 0, 'utf8_mode': 0, 'program_name': './_testembed', 'argc': 0, 'argv': '[]', 'program': '(null)', 'Py_IsolatedFlag': 0, 'Py_NoSiteFlag': 0, 'Py_BytesWarningFlag': 0, 'Py_InspectFlag': 0, 'Py_InteractiveFlag': 0, 'Py_OptimizeFlag': 0, 'Py_DebugFlag': 0, 'Py_DontWriteBytecodeFlag': 0, 'Py_VerboseFlag': 0, 'Py_QuietFlag': 0, 'Py_NoUserSiteDirectory': 0, 'Py_UnbufferedStdioFlag': 0, '_disable_importlib': 0, 'Py_FrozenFlag': 0, '_coerce_c_locale': 0, '_coerce_c_locale_warn': 0, } def check_config(self, testname, expected): env = dict(os.environ) for key in list(env): if key.startswith('PYTHON'): del env[key] # Disable C locale coercion and UTF-8 mode to not depend # on the current locale env['PYTHONCOERCECLOCALE'] = '0' env['PYTHONUTF8'] = '0' out, err = self.run_embedded_interpreter(testname, env=env) # Ignore err expected = dict(self.DEFAULT_CONFIG, **expected) for key, value in expected.items(): expected[key] = str(value) config = {} for line in out.splitlines(): key, value = line.split(' = ', 1) config[key] = value self.assertEqual(config, expected) def test_init_default_config(self): self.check_config("init_default_config", {}) def test_init_global_config(self): config = { 'program_name': './globalvar', 'Py_NoSiteFlag': 1, 'Py_BytesWarningFlag': 1, 'Py_InspectFlag': 1, 'Py_InteractiveFlag': 1, 'Py_OptimizeFlag': 2, 'Py_DontWriteBytecodeFlag': 1, 'Py_VerboseFlag': 1, 'Py_QuietFlag': 1, 'Py_UnbufferedStdioFlag': 1, 'utf8_mode': 1, 'Py_NoUserSiteDirectory': 1, 'Py_FrozenFlag': 1, } self.check_config("init_global_config", config) def test_init_from_config(self): config = { 'install_signal_handlers': 0, 'use_hash_seed': 1, 'hash_seed': 123, 'allocator': 'malloc_debug', 'tracemalloc': 2, 'import_time': 1, 'show_ref_count': 1, 'show_alloc_count': 1, 'malloc_stats': 1, 'utf8_mode': 1, 'program_name': './conf_program_name', 'program': 'conf_program', 'faulthandler': 1, } self.check_config("init_from_config", config) def test_init_env(self): config = { 'use_hash_seed': 1, 'hash_seed': 42, 'allocator': 'malloc_debug', 'tracemalloc': 2, 'import_time': 1, 'malloc_stats': 1, 'utf8_mode': 1, 'Py_InspectFlag': 1, 'Py_OptimizeFlag': 2, 'Py_DontWriteBytecodeFlag': 1, 'Py_VerboseFlag': 1, 'Py_UnbufferedStdioFlag': 1, 'Py_NoUserSiteDirectory': 1, 'faulthandler': 1, 'dev_mode': 1, } self.check_config("init_env", config) def test_init_dev_mode(self): config = { 'dev_mode': 1, 'faulthandler': 1, 'allocator': 'debug', } self.check_config("init_dev_mode", config) def test_init_isolated(self): config = { 'Py_IsolatedFlag': 1, 'Py_IgnoreEnvironmentFlag': 1, 'Py_NoUserSiteDirectory': 1, } self.check_config("init_isolated", config) if __name__ == "__main__": unittest.main()
[]
[]
[ "SYSTEMROOT" ]
[]
["SYSTEMROOT"]
python
1
0
src/cli/cache.go
package cli import ( "fmt" "oh-my-posh/environment" "os" "os/exec" "path/filepath" "strings" "github.com/spf13/cobra" ) // getCmd represents the get command var getCache = &cobra.Command{ Use: "cache [path|clear|edit]", Short: "Interact with the oh-my-posh cache", Long: `Interact with the oh-my-posh cache. You can do the following: - path: list cache path - clear: remove all cache values - edit: edit cache values`, ValidArgs: []string{ "path", "clear", "edit", }, Args: NoArgsOrOneValidArg, Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { _ = cmd.Help() return } env := &environment.ShellEnvironment{ Version: cliVersion, } env.Init(false) defer env.Close() switch args[0] { case "path": fmt.Print(env.CachePath()) case "clear": cacheFilePath := filepath.Join(env.CachePath(), environment.CacheFile) err := os.Remove(cacheFilePath) if err != nil { fmt.Println(err.Error()) return } fmt.Printf("removed cache file at %s\n", cacheFilePath) case "edit": cacheFilePath := filepath.Join(env.CachePath(), environment.CacheFile) editFileWithEditor(cacheFilePath) } }, } func init() { // nolint:gochecknoinits rootCmd.AddCommand(getCache) } func editFileWithEditor(file string) { editor := os.Getenv("EDITOR") var args []string if strings.Contains(editor, " ") { splitted := strings.Split(editor, " ") editor = splitted[0] args = splitted[1:] } args = append(args, file) cmd := exec.Command(editor, args...) err := cmd.Run() if err != nil { fmt.Println(err.Error()) } }
[ "\"EDITOR\"" ]
[]
[ "EDITOR" ]
[]
["EDITOR"]
go
1
0
django_vest/config/backends.py
# coding: utf-8 from __future__ import unicode_literals import os from django.conf import settings as django_settings from django.utils.functional import cached_property from django_vest.fields import get_field class Simple(object): """ Simple wrapper around settings file. """ source = django_settings def __getattr__(self, name): return getattr(self.source, name) @property def CURRENT_THEME(self): """ Trying to getting `CURRENT_THEME` parameter from settings or os env. """ return getattr(self.source, 'CURRENT_THEME', None) @property def DEFAULT_THEME(self): return getattr(self.source, 'DEFAULT_THEME', None) class Env(object): """ Receive settings for OS env. """ @property def CURRENT_THEME(self): """ Trying to getting `CURRENT_THEME` parameter from settings or os env. """ return os.environ.get('DJANGO_VEST_CURRENT_THEME', None) @property def DEFAULT_THEME(self): return os.environ.get('DJANGO_VEST_DEFAULT_THEME', None) class Database(object): """ Receive `CURRENT_THEME` for db field (django_vest.fields.VestField). """ @property def CURRENT_THEME(self): field = get_field() settings = field.model.objects.first() if settings: return getattr(settings, field.name) simple = Simple() env = Env() database = Database()
[]
[]
[ "DJANGO_VEST_DEFAULT_THEME", "DJANGO_VEST_CURRENT_THEME" ]
[]
["DJANGO_VEST_DEFAULT_THEME", "DJANGO_VEST_CURRENT_THEME"]
python
2
0
pkg/broker/tasks.go
package broker import ( "bytes" "context" "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/json" "errors" "github.com/golang/glog" "net/http" "os" "time" ) type TaskAction string const ( DeleteTask TaskAction = "delete" ResyncFromProviderTask TaskAction = "resync-from-provider" ResyncFromProviderUntilAvailableTask TaskAction = "resync-until-available" NotifyCreateServiceWebhookTask TaskAction = "notify-create-service-webhook" NotifyCreateBindingWebhookTask TaskAction = "notify-create-binding-webhook" ChangeProvidersTask TaskAction = "change-providers" ChangePlansTask TaskAction = "change-plans" RestoreTask TaskAction = "restore" PerformPostProvisionTask TaskAction = "perform-post-provision" ) type Task struct { Id string Action TaskAction ResourceId string Status string Retries int64 Metadata string Result string Started *time.Time Finished *time.Time } type WebhookTaskMetadata struct { Url string `json:"url"` Secret string `json:"secret"` } type ChangeProvidersTaskMetadata struct { Plan string `json:"plan"` } type ChangePlansTaskMetadata struct { Plan string `json:"plan"` } type RestoreTaskMetadata struct { Backup string `json:"backup"` } func FinishedTask(storage Storage, taskId string, retries int64, result string, status string) { var t = time.Now() err := storage.UpdateTask(taskId, &status, &retries, nil, &result, nil, &t) if err != nil { glog.Errorf("Unable to update task %s due to: %s (taskId: %s, retries: %d, result: [%s], status: [%s]\n", taskId, err.Error(), taskId, retries, result, status) } } func UpdateTaskStatus(storage Storage, taskId string, retries int64, result string, status string) { err := storage.UpdateTask(taskId, &status, &retries, nil, &result, nil, nil) if err != nil { glog.Errorf("Unable to update task %s due to: %s (taskId: %s, retries: %d, result: [%s], status: [%s]\n", taskId, err.Error(), taskId, retries, result, status) } } func RunPreprovisionTasks(ctx context.Context, o Options, namePrefix string, storage Storage, wait int64) { t := time.NewTicker(time.Second * time.Duration(wait)) dbEntries, err := storage.StartProvisioningTasks() if err != nil { glog.Errorf("Get pending tasks failed: %s\n", err.Error()) return } for _, entry := range dbEntries { glog.Infof("Starting preprovisioning database: %s with plan: %s\n", entry.Id, entry.PlanId) plan, err := storage.GetPlanByID(entry.PlanId) if err != nil { glog.Errorf("Unable to provision, cannot find plan: %s, %s\n", entry.PlanId, err.Error()) storage.NukeInstance(entry.Id) continue } provider, err := GetProviderByPlan(namePrefix, plan) if err != nil { glog.Errorf("Unable to provision, cannot find provider (GetProviderByPlan failed): %s\n", err.Error()) storage.NukeInstance(entry.Id) continue } Instance, err := provider.Provision(entry.Id, plan, "preprovisioned") if err != nil { glog.Errorf("Error provisioning database (%s): %s\n", plan.ID, err.Error()) storage.NukeInstance(entry.Id) continue } if err = storage.UpdateInstance(Instance, Instance.Plan.ID); err != nil { glog.Errorf("Error inserting record into provisioned table: %s\n", err.Error()) if err = provider.Deprovision(Instance, false); err != nil { glog.Errorf("Error cleaning up (deprovision failed) after insert record failed but provision succeeded (Database Id:%s Name: %s) %s\n", Instance.Id, Instance.Name, err.Error()) if _, err = storage.AddTask(Instance.Id, DeleteTask, Instance.Name); err != nil { glog.Errorf("Error: Unable to add task to delete instance, WE HAVE AN ORPHAN! (%s): %s\n", Instance.Name, err.Error()) } } continue } if !IsAvailable(Instance.Status) { if _, err = storage.AddTask(Instance.Id, ResyncFromProviderUntilAvailableTask, ""); err != nil { glog.Errorf("Error: Unable to schedule resync from provider! (%s): %s\n", Instance.Name, err.Error()) } } glog.Infof("Finished preprovisioning database: %s with plan: %s\n", entry.Id, entry.PlanId) <-t.C } } func TickTocPreprovisionTasks(ctx context.Context, o Options, namePrefix string, storage Storage) { next_check := time.NewTicker(time.Second * 60 * 5) for { RunPreprovisionTasks(ctx, o, namePrefix, storage, 60) <-next_check.C } } func UpgradeWithinProviders(storage Storage, fromDb *Instance, toPlanId string, namePrefix string) (string, error) { toPlan, err := storage.GetPlanByID(toPlanId) if err != nil { return "", err } fromProvider, err := GetProviderByPlan(namePrefix, fromDb.Plan) if err != nil { return "", err } if toPlanId == fromDb.Plan.ID { return "", errors.New("Cannot upgrade to the same plan") } if toPlan.Provider != fromDb.Plan.Provider { return UpgradeAcrossProviders(storage, fromDb, toPlanId, namePrefix) } // This could take a very long time. Instance, err := fromProvider.Modify(fromDb, toPlan) if err != nil && err.Error() == "This feature is not available on this plan." { return UpgradeAcrossProviders(storage, fromDb, toPlanId, namePrefix) } if err != nil { return "", err } if err = storage.UpdateInstance(Instance, Instance.Plan.ID); err != nil { glog.Errorf("ERROR: Cannot update instance in database after upgrade change %s (to plan: %s) %s\n", Instance.Name, Instance.Plan.ID, err.Error()) return "", err } if !IsAvailable(Instance.Status) { if _, err = storage.AddTask(Instance.Id, ResyncFromProviderTask, ""); err != nil { glog.Errorf("Error: Unable to schedule resync from provider! (%s): %s\n", Instance.Name, err.Error()) } } return "", err } func UpgradeAcrossProviders(storage Storage, from *Instance, toPlanId string, namePrefix string) (string, error) { if from == nil { return "", errors.New("Instance from was nil, cannot upgrade across providers with no from resource.") } if from.Engine != "memcached" { return "", errors.New("Redis instances cannot be upgraded across providers.") } toPlan, err := storage.GetPlanByID(toPlanId) if err != nil { return "", err } fromProvider, err := GetProviderByPlan(namePrefix, from.Plan) if err != nil { return "", err } toProvider, err := GetProviderByPlan(namePrefix, toPlan) if err != nil { return "", err } // Memcached is holds no state, create the new one, remove the old one, update the db with the same id. newInstance, err := toProvider.Provision(from.Id, toPlan, "") if err != nil { return "", err } if err = fromProvider.Deprovision(from, true); err != nil { glog.Errorf("ERROR: Unable to deprovision old instance, attempting to remove potential orphan %#+v\n", newInstance) if err2 := toProvider.Deprovision(newInstance, true); err2 != nil { glog.Errorf("ERROR FATAL: We could not deprovision the orphaned database, we're now leaking memcached instances! %#+v\n", newInstance) } return "", err } if err = storage.UpdateInstance(newInstance, toPlan.ID); err != nil { glog.Errorf("ERROR: Cannot update instance of memcached after upgrade change %s (to plan: %s) %s\n", from.Name, from.Plan.ID, err.Error()) return "", err } if !IsAvailable(newInstance.Status) { if _, err = storage.AddTask(newInstance.Id, ResyncFromProviderTask, ""); err != nil { glog.Errorf("Error: Unable to schedule resync from provider! (%s): %s\n", newInstance.Name, err.Error()) } } return "", err } func RestoreBackup(storage Storage, instance *Instance, namePrefix string, backup string) error { provider, err := GetProviderByPlan(namePrefix, instance.Plan) if err != nil { glog.Errorf("Unable to restore backup, cannot find provider (GetProviderByPlan failed): %s\n", err.Error()) return err } if err = provider.RestoreBackup(instance, backup); err != nil { glog.Errorf("Unable to restore backup: %s\n", err.Error()) return err } return nil } func RunWorkerTasks(ctx context.Context, o Options, namePrefix string, storage Storage) error { t := time.NewTicker(time.Second * 60) for { <-t.C storage.WarnOnUnfinishedTasks() task, err := storage.PopPendingTask() if err != nil && err.Error() != "sql: no rows in result set" { glog.Errorf("Getting a pending task failed: %s\n", err.Error()) return err } else if err != nil && err.Error() == "sql: no rows in result set" { // Nothing to do... continue } glog.Infof("Started task: %s\n", task.Id) if task.Action == DeleteTask { glog.Infof("Delete and deprovision database for task: %s\n", task.Id) if task.Retries >= 10 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to delete database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get Instance: "+err.Error(), "pending") continue } provider, err := GetProviderByPlan(namePrefix, Instance.Plan) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get provider: "+err.Error(), "pending") continue } if err = provider.Deprovision(Instance, true); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to deprovision: "+err.Error(), "pending") continue } if err = storage.DeleteInstance(Instance); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to delete: "+err.Error(), "pending") continue } FinishedTask(storage, task.Id, task.Retries, "", "finished") } else if task.Action == ResyncFromProviderTask { glog.Infof("Resyncing from provider for task: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to resync information from provider for database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get Instance: "+err.Error(), "pending") continue } Entry, err := storage.GetInstance(task.ResourceId) if err != nil { glog.Infof("Failed to get database instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get Entry: "+err.Error(), "pending") continue } if Instance.Status != Entry.Status { if err = storage.UpdateInstance(Instance, Instance.Plan.ID); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to update instance: "+err.Error(), "pending") continue } } else { glog.Infof("Status did not change at provider for task: %s\n", task.Id) UpdateTaskStatus(storage, task.Id, task.Retries+1, "No change in status since last check", "pending") continue } FinishedTask(storage, task.Id, task.Retries, "", "finished") } else if task.Action == ResyncFromProviderUntilAvailableTask { glog.Infof("Resyncing from provider until available for task: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to resync information from provider for database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get Instance: "+err.Error(), "pending") continue } if err = storage.UpdateInstance(Instance, Instance.Plan.ID); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to update instance: "+err.Error(), "pending") continue } if !IsAvailable(Instance.Status) { glog.Infof("Status did not change at provider for task: %s\n", task.Id) UpdateTaskStatus(storage, task.Id, task.Retries+1, "No change in status since last check ("+Instance.Status+")", "pending") continue } FinishedTask(storage, task.Id, task.Retries, "", "finished") } else if task.Action == PerformPostProvisionTask { glog.Infof("Resyncing from provider until available (for perform post provision) for task: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to resync information from provider for database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot get Instance: "+err.Error(), "pending") continue } if err = storage.UpdateInstance(Instance, Instance.Plan.ID); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to update instance: "+err.Error(), "pending") continue } if !IsAvailable(Instance.Status) { glog.Infof("Status did not change at provider for task: %s\n", task.Id) UpdateTaskStatus(storage, task.Id, task.Retries+1, "No change in status since last check ("+Instance.Status+")", "pending") continue } provider, err := GetProviderByPlan(namePrefix, Instance.Plan) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot get provider: "+err.Error(), "pending") continue } newInstance, err := provider.PerformPostProvision(Instance) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to update instance: "+err.Error(), "pending") continue } if err = storage.UpdateInstance(newInstance, newInstance.Plan.ID); err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to update instance after post provision: "+err.Error(), "pending") continue } FinishedTask(storage, task.Id, task.Retries, "", "finished") } else if task.Action == NotifyCreateServiceWebhookTask { if task.Retries >= 60 { FinishedTask(storage, task.Id, task.Retries, "Unable to deliver webhook: "+task.Result, "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot get Instance: "+err.Error(), "pending") continue } if !IsAvailable(Instance.Status) { glog.Infof("Status did not change at provider for task: %s\n", task.Id) UpdateTaskStatus(storage, task.Id, task.Retries+1, "No change in status since last check", "pending") continue } byteData, err := json.Marshal(map[string]interface{}{"state": "succeeded", "description": "available"}) // seems like this would be more useful, but whatevs: byteData, err := json.Marshal(Instance) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot marshal Instance to json: "+err.Error(), "pending") continue } var taskMetaData WebhookTaskMetadata err = json.Unmarshal([]byte(task.Metadata), &taskMetaData) if err != nil { glog.Infof("Cannot unmarshal task metadata to callback on create service: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot unmarshal task metadata to callback on create service: "+err.Error(), "pending") continue } h := hmac.New(sha256.New, []byte(taskMetaData.Secret)) h.Write(byteData) sha := base64.StdEncoding.EncodeToString(h.Sum(nil)) client := &http.Client{} req, err := http.NewRequest("POST", taskMetaData.Url, bytes.NewReader(byteData)) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to create http post request: "+err.Error(), "pending") continue } req.Header.Add("content-type", "application/json") req.Header.Add("x-osb-signature", sha) resp, err := client.Do(req) if err != nil { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Failed to send http post operation: "+err.Error(), "pending") continue } resp.Body.Close() // ignore it, we dont want to hear it. if os.Getenv("RETRY_WEBHOOKS") != "" { if resp.StatusCode < 200 || resp.StatusCode > 399 { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Got invalid http status code from hook: "+resp.Status, "pending") continue } FinishedTask(storage, task.Id, task.Retries, resp.Status, "finished") } else { if resp.StatusCode < 200 || resp.StatusCode > 399 { UpdateTaskStatus(storage, task.Id, task.Retries+1, "Got invalid http status code from hook: "+resp.Status, "failed") } else { FinishedTask(storage, task.Id, task.Retries, resp.Status, "finished") } } } else if task.Action == ChangePlansTask { glog.Infof("Changing plans for database: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to change plans for database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot get Instance: "+err.Error(), "pending") continue } var taskMetaData ChangePlansTaskMetadata err = json.Unmarshal([]byte(task.Metadata), &taskMetaData) if err != nil { glog.Infof("Cannot unmarshal task metadata to change providers: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot unmarshal task metadata to change providers: "+err.Error(), "pending") continue } output, err := UpgradeWithinProviders(storage, Instance, taskMetaData.Plan, namePrefix) if err != nil { glog.Infof("Cannot change plans for: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries+1, "Cannot change plans: "+err.Error(), "pending") continue } FinishedTask(storage, task.Id, task.Retries, output, "finished") } else if task.Action == ChangeProvidersTask { glog.Infof("Changing providers for database: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to resync information from provider for database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } Instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot get Instance: "+err.Error(), "pending") continue } var taskMetaData ChangeProvidersTaskMetadata err = json.Unmarshal([]byte(task.Metadata), &taskMetaData) if err != nil { glog.Infof("Cannot unmarshal task metadata to change providers: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot unmarshal task metadata to change providers: "+err.Error(), "pending") continue } output, err := UpgradeAcrossProviders(storage, Instance, taskMetaData.Plan, namePrefix) if err != nil { glog.Infof("Cannot switch providers: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot switch providers: "+err.Error(), "pending") continue } FinishedTask(storage, task.Id, task.Retries, output, "finished") } else if task.Action == RestoreTask { glog.Infof("Restoring database for: %s\n", task.Id) if task.Retries >= 60 { glog.Infof("Retry limit was reached for task: %s %d\n", task.Id, task.Retries) FinishedTask(storage, task.Id, task.Retries, "Unable to restore database "+task.ResourceId+" as it failed multiple times ("+task.Result+")", "failed") continue } instance, err := GetInstanceById(namePrefix, storage, task.ResourceId) if err != nil { glog.Infof("Failed to get provider instance for task: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot get instance: "+err.Error(), "pending") continue } var taskMetaData RestoreTaskMetadata err = json.Unmarshal([]byte(task.Metadata), &taskMetaData) if err != nil { glog.Infof("Cannot unmarshal task metadata to restore databases: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot unmarshal task metadata to restore databases: "+err.Error(), "pending") continue } if err = RestoreBackup(storage, instance, namePrefix, taskMetaData.Backup); err != nil { glog.Infof("Cannot restore backups for: %s, %s\n", task.Id, err.Error()) UpdateTaskStatus(storage, task.Id, task.Retries, "Cannot restore backup: "+err.Error(), "pending") continue } FinishedTask(storage, task.Id, task.Retries, "", "finished") } glog.Infof("Finished task: %s\n", task.Id) } return nil } func RunBackgroundTasks(ctx context.Context, o Options) error { storage, namePrefix, err := InitFromOptions(ctx, o) if err != nil { return err } go TickTocPreprovisionTasks(ctx, o, namePrefix, storage) return RunWorkerTasks(ctx, o, namePrefix, storage) }
[ "\"RETRY_WEBHOOKS\"" ]
[]
[ "RETRY_WEBHOOKS" ]
[]
["RETRY_WEBHOOKS"]
go
1
0
ts-backend/ts-adm/src/main/java/com/stc/ts/adm/TsAdm.java
package com.stc.ts.adm; import com.stc.ts.adm.subsystem.SubSystem; import com.stc.ts.adm.subsystem.SubSystemFactory; import lombok.extern.slf4j.Slf4j; import java.lang.reflect.InvocationTargetException; import java.util.Map; import java.util.Properties; @Slf4j public class TsAdm { public static void main(String[] args) throws InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException { PropertyResolver propertyResolver = new PropertyResolver(); String version = propertyResolver.getVersion() == null ? "dev" : propertyResolver.getVersion(); System.out.println("CLI version: " + version); if (args.length == 0) { System.out.println(SubSystemFactory.SUB_SYSTEM_HELP_STRING); System.exit(1); } Properties properties = propertyResolver.getProperties(); properties.setProperty("version", version); properties.setProperty("banner", " ______ _____ _ ________ ____\n" + " /_ __/ / ___/___ ______ __(_)_______ / ____/ / / _/\n" + " / /_____\\__ \\/ _ \\/ ___/ | / / / ___/ _ \\ / / / / / / \n" + " / /_____/__/ / __/ / | |/ / / /__/ __/ / /___/ /____/ / \n" + "/_/ /____/\\___/_/ |___/_/\\___/\\___/ \\____/_____/___/\n"); resolveEnvValues(properties); if(!SubSystemFactory.isValid(args[0])){ System.out.println(SubSystemFactory.SUB_SYSTEM_HELP_STRING); System.exit(1); } SubSystem subSystem = SubSystemFactory.getSubSystem(args[0], properties); String[] subSystemArgs = new String[args.length - 1]; System.arraycopy(args, 1, subSystemArgs, 0, subSystemArgs.length); subSystem.proceed(subSystemArgs); } private static void resolveEnvValues(Properties properties) { Map<String, String> getenv = System.getenv(); for (Map.Entry<String, String> entry: getenv.entrySet()){ properties.setProperty(entry.getKey(), entry.getValue()); } } }
[]
[]
[]
[]
[]
java
0
0
stingray/io.py
import logging import math import copy import os import pickle import warnings from collections.abc import Iterable import numpy as np from astropy.io import fits from astropy.table import Table from astropy.logger import AstropyUserWarning import stingray.utils as utils from .utils import assign_value_if_none, is_string, order_list_of_arrays from .gti import get_gti_from_all_extensions, load_gtis # Python 3 import pickle _H5PY_INSTALLED = True try: import h5py except ImportError: _H5PY_INSTALLED = False def rough_calibration(pis, mission): """Make a rough conversion betwenn PI channel and energy. Only works for NICER, NuSTAR, and XMM. Parameters ---------- pis: float or array of floats PI channels in data mission: str Mission name Returns ------- energies : float or array of floats Energy values Examples -------- >>> rough_calibration(0, 'nustar') 1.6 >>> # It's case-insensitive >>> rough_calibration(1200, 'XMm') 1.2 >>> rough_calibration(10, 'asDf') Traceback (most recent call last): ... ValueError: Mission asdf not recognized >>> rough_calibration(100, 'nicer') 1.0 """ if mission.lower() == "nustar": return pis * 0.04 + 1.6 elif mission.lower() == "xmm": return pis * 0.001 elif mission.lower() == "nicer": return pis * 0.01 raise ValueError(f"Mission {mission.lower()} not recognized") def get_file_extension(fname): """Get the extension from the file name. If g-zipped, add '.gz' to extension. Examples -------- >>> get_file_extension('ciao.tar') '.tar' >>> get_file_extension('ciao.tar.gz') '.tar.gz' >>> get_file_extension('ciao.evt.gz') '.evt.gz' >>> get_file_extension('ciao.a.tutti.evt.gz') '.evt.gz' """ fname_root = fname.replace('.gz', '') fname_root = os.path.splitext(fname_root)[0] return fname.replace(fname_root, '') def high_precision_keyword_read(hdr, keyword): """Read FITS header keywords, also if split in two. In the case where the keyword is split in two, like MJDREF = MJDREFI + MJDREFF in some missions, this function returns the summed value. Otherwise, the content of the single keyword Parameters ---------- hdr : dict_like The FITS header structure, or a dictionary keyword : str The key to read in the header Returns ------- value : long double The value of the key, or ``None`` if something went wrong """ try: value = np.longdouble(hdr[keyword]) return value except KeyError: pass try: if len(keyword) == 8: keyword = keyword[:7] value = np.longdouble(hdr[keyword + 'I']) value += np.longdouble(hdr[keyword + 'F']) return value except KeyError: return None def _patch_mission_info(info, mission=None): """Add some information that is surely missing in xselect.mdb. Examples -------- >>> info = {'gti': 'STDGTI'} >>> new_info = _patch_mission_info(info, mission=None) >>> new_info['gti'] == info['gti'] True >>> new_info = _patch_mission_info(info, mission="xmm") >>> new_info['gti'] 'STDGTI,GTI0' """ if mission is None: return info if mission.lower() == "xmm" and "gti" in info: info["gti"] += ",GTI0" return info def read_mission_info(mission=None): """Search the relevant information about a mission in xselect.mdb.""" curdir = os.path.abspath(os.path.dirname(__file__)) fname = os.path.join(curdir, "datasets", "xselect.mdb") # If HEADAS is defined, search for the most up-to-date version of the # mission database if os.getenv("HEADAS"): hea_fname = os.path.join(os.getenv("HEADAS"), "bin", "xselect.mdb") if os.path.exists(hea_fname): fname = hea_fname if mission is not None: mission = mission.lower() db = {} with open(fname) as fobj: for line in fobj.readlines(): line = line.strip() if mission is not None and not line.lower().startswith(mission): continue if line.startswith("!") or line == "": continue allvals = line.split() string = allvals[0] value = allvals[1:] if len(value) == 1: value = value[0] data = string.split(":")[:] if mission is None: if data[0] not in db: db[data[0]] = {} previous_db_step = db[data[0]] else: previous_db_step = db data = data[1:] for key in data[:-1]: if key not in previous_db_step: previous_db_step[key] = {} previous_db_step = previous_db_step[key] previous_db_step[data[-1]] = value return _patch_mission_info(db, mission) def _case_insensitive_search_in_list(string, list_of_strings): """Search for a string in a list of strings, in a case-insensitive way. Example ------- >>> _case_insensitive_search_in_list("a", ["A", "b"]) 'A' >>> _case_insensitive_search_in_list("a", ["c", "b"]) is None True """ for s in list_of_strings: if string.lower() == s.lower(): return s return None def _get_additional_data(lctable, additional_columns): """Get additional data from a FITS data table. Parameters ---------- lctable: `astropy.io.fits.fitsrec.FITS_rec` Data table additional_columns: list of str List of column names to retrieve from the table Returns ------- additional_data: dict Dictionary associating to each additional column the content of the table. """ additional_data = {} if additional_columns is not None: for a in additional_columns: key = _case_insensitive_search_in_list(a, lctable._coldefs.names) if key is not None: additional_data[a] = np.array(lctable.field(key)) else: warnings.warn('Column ' + a + ' not found') additional_data[a] = np.zeros(len(lctable)) return additional_data def get_key_from_mission_info(info, key, default, inst=None, mode=None): """Get the name of a header key or table column from the mission database. Many entries in the mission database have default values that can be altered for specific instruments or observing modes. Here, if there is a definition for a given instrument and mode, we take that, otherwise we use the default). Parameters ---------- info : dict Nested dictionary containing all the information for a given mission. It can be nested, e.g. contain some info for a given instrument, and for each observing mode of that instrument. key : str The key to read from the info dictionary default : object The default value. It can be of any type, depending on the expected type for the entry. Other parameters ---------------- inst : str Instrument mode : str Observing mode Returns ------- retval : object The wanted entry from the info dictionary Examples -------- >>> info = {'ecol': 'PI', "A": {"ecol": "BLA"}, "C": {"M1": {"ecol": "X"}}} >>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode=None) 'BLA' >>> get_key_from_mission_info(info, "ecol", "BU", inst="B", mode=None) 'PI' >>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode="M1") 'BLA' >>> get_key_from_mission_info(info, "ecol", "BU", inst="C", mode="M1") 'X' >>> get_key_from_mission_info(info, "ghghg", "BU", inst="C", mode="M1") 'BU' """ filt_info = copy.deepcopy(info) if inst is not None and inst in filt_info: filt_info.update(info[inst]) filt_info.pop(inst) if mode is not None and mode in filt_info: filt_info.update(info[inst][mode]) filt_info.pop(mode) if key in filt_info: return filt_info[key] return default def lcurve_from_fits( fits_file, gtistring="GTI", timecolumn="TIME", ratecolumn=None, ratehdu=1, fracexp_limit=0.9, outfile=None, noclobber=False, outdir=None, ): """Load a lightcurve from a fits file. .. note :: FITS light curve handling is still under testing. Absolute times might be incorrect depending on the light curve format. Parameters ---------- fits_file : str File name of the input light curve in FITS format Returns ------- data : dict Dictionary containing all information needed to create a :class:`stingray.Lightcurve` object Other Parameters ---------------- gtistring : str Name of the GTI extension in the FITS file timecolumn : str Name of the column containing times in the FITS file ratecolumn : str Name of the column containing rates in the FITS file ratehdu : str or int Name or index of the FITS extension containing the light curve fracexp_limit : float Minimum exposure fraction allowed noclobber : bool If True, do not overwrite existing files """ warnings.warn( """WARNING! FITS light curve handling is still under testing. Absolute times might be incorrect.""" ) # TODO: # treat consistently TDB, UTC, TAI, etc. This requires some documentation # reading. For now, we assume TDB from astropy.io import fits as pf from astropy.time import Time import numpy as np from stingray.gti import create_gti_from_condition lchdulist = pf.open(fits_file) lctable = lchdulist[ratehdu].data # Units of header keywords tunit = lchdulist[ratehdu].header["TIMEUNIT"] try: mjdref = high_precision_keyword_read( lchdulist[ratehdu].header, "MJDREF" ) mjdref = Time(mjdref, scale="tdb", format="mjd") except Exception: mjdref = None try: instr = lchdulist[ratehdu].header["INSTRUME"] except Exception: instr = "EXTERN" # ---------------------------------------------------------------- # Trying to comply with all different formats of fits light curves. # It's a madness... try: tstart = high_precision_keyword_read( lchdulist[ratehdu].header, "TSTART" ) tstop = high_precision_keyword_read(lchdulist[ratehdu].header, "TSTOP") except Exception: # pragma: no cover raise (Exception("TSTART and TSTOP need to be specified")) # For nulccorr lcs this whould work timezero = high_precision_keyword_read( lchdulist[ratehdu].header, "TIMEZERO" ) # Sometimes timezero is "from tstart", sometimes it's an absolute time. # This tries to detect which case is this, and always consider it # referred to tstart timezero = assign_value_if_none(timezero, 0) # for lcurve light curves this should instead work if tunit == "d": # TODO: # Check this. For now, I assume TD (JD - 2440000.5). # This is likely wrong timezero = Time(2440000.5 + timezero, scale="tdb", format="jd") tstart = Time(2440000.5 + tstart, scale="tdb", format="jd") tstop = Time(2440000.5 + tstop, scale="tdb", format="jd") # if None, use NuSTAR defaulf MJDREF mjdref = assign_value_if_none( mjdref, Time( np.longdouble("55197.00076601852"), scale="tdb", format="mjd" ), ) timezero = (timezero - mjdref).to("s").value tstart = (tstart - mjdref).to("s").value tstop = (tstop - mjdref).to("s").value if timezero > tstart: timezero -= tstart time = np.array(lctable.field(timecolumn), dtype=np.longdouble) if time[-1] < tstart: time += timezero + tstart else: time += timezero try: dt = high_precision_keyword_read(lchdulist[ratehdu].header, "TIMEDEL") if tunit == "d": dt *= 86400 except Exception: warnings.warn( "Assuming that TIMEDEL is the median difference between the" " light curve times", AstropyUserWarning, ) # Avoid NaNs good = time == time dt = np.median(np.diff(time[good])) # ---------------------------------------------------------------- if ratecolumn is None: for name in ["RATE", "RATE1", "COUNTS"]: if name in lctable.names: ratecolumn = name break else: # pragma: no cover raise ValueError( "None of the accepted rate columns were found in the file") rate = np.array(lctable.field(ratecolumn), dtype=float) errorcolumn = "ERROR" if ratecolumn == "RATE1": errorcolumn = "ERROR1" try: rate_e = np.array(lctable.field(errorcolumn), dtype=np.longdouble) except Exception: rate_e = np.zeros_like(rate) if "RATE" in ratecolumn: rate *= dt rate_e *= dt try: fracexp = np.array(lctable.field("FRACEXP"), dtype=np.longdouble) except Exception: fracexp = np.ones_like(rate) good_intervals = ( (rate == rate) * (fracexp >= fracexp_limit) * (fracexp <= 1) ) rate[good_intervals] /= fracexp[good_intervals] rate_e[good_intervals] /= fracexp[good_intervals] rate[~good_intervals] = 0 try: gtitable = lchdulist[gtistring].data gti_list = np.array( [ [a, b] for a, b in zip( gtitable.field("START"), gtitable.field("STOP") ) ], dtype=np.longdouble, ) except Exception: gti_list = create_gti_from_condition(time, good_intervals) lchdulist.close() res = {"time": time, "counts": rate, "err": rate_e, "gti": gti_list, "mjdref": mjdref.mjd, "dt": dt, "instr": instr, "header": lchdulist[ratehdu].header.tostring()} return res def load_events_and_gtis( fits_file, additional_columns=None, gtistring=None, gti_file=None, hduname=None, column=None, ): """Load event lists and GTIs from one or more files. Loads event list from HDU EVENTS of file fits_file, with Good Time intervals. Optionally, returns additional columns of data from the same HDU of the events. Parameters ---------- fits_file : str Other parameters ---------------- additional_columns: list of str, optional A list of keys corresponding to the additional columns to extract from the event HDU (ex.: ['PI', 'X']) gtistring : str Comma-separated list of accepted GTI extensions (default GTI,STDGTI), with or without appended integer number denoting the detector gti_file : str, default None External GTI file hduname : str or int, default 1 Name of the HDU containing the event list column : str, default None The column containing the time values. If None, we use the name specified in the mission database, and if there is nothing there, "TIME" return_limits: bool, optional Return the TSTART and TSTOP keyword values Returns ------- retvals : Object with the following attributes: ev_list : array-like Event times in Mission Epoch Time gti_list: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...] GTIs in Mission Epoch Time additional_data: dict A dictionary, where each key is the one specified in additional_colums. The data are an array with the values of the specified column in the fits file. t_start : float Start time in Mission Epoch Time t_stop : float Stop time in Mission Epoch Time pi_list : array-like Raw Instrument energy channels cal_pi_list : array-like Calibrated PI channels (those that can be easily converted to energy values, regardless of the instrument setup.) energy_list : array-like Energy of each photon in keV (only for NuSTAR, NICER, XMM) instr : str Name of the instrument (e.g. EPIC-pn or FPMA) mission : str Name of the instrument (e.g. XMM or NuSTAR) mjdref : float MJD reference time for the mission header : str Full header of the FITS file, for debugging purposes detector_id : array-like, int Detector id for each photon (e.g. each of the CCDs composing XMM's or Chandra's instruments) """ from astropy.io import fits as pf hdulist = pf.open(fits_file) probe_header = hdulist[0].header # Let's look for TELESCOP here. This is the most common keyword to be # found in well-behaved headers. If it is not in header 0, I take this key # and the remaining information from header 1. if "TELESCOP" not in probe_header: probe_header = hdulist[1].header mission_key = "MISSION" if mission_key not in probe_header: mission_key = "TELESCOP" mission = probe_header[mission_key].lower() db = read_mission_info(mission) instkey = get_key_from_mission_info(db, "instkey", "INSTRUME") instr = mode = None if instkey in probe_header: instr = probe_header[instkey].strip() modekey = get_key_from_mission_info(db, "dmodekey", None, instr) if modekey is not None and modekey in probe_header: mode = probe_header[modekey].strip() gtistring = get_key_from_mission_info(db, "gti", "GTI,STDGTI", instr, mode) if hduname is None: hduname = get_key_from_mission_info(db, "events", "EVENTS", instr, mode) if hduname not in hdulist: warnings.warn(f'HDU {hduname} not found. Trying first extension') hduname = 1 datatable = hdulist[hduname].data header = hdulist[hduname].header ephem = timeref = timesys = None if "PLEPHEM" in header: ephem = header["PLEPHEM"].strip().lstrip('JPL-').lower() if "TIMEREF" in header: timeref = header["TIMEREF"].strip().lower() if "TIMESYS" in header: timesys = header["TIMESYS"].strip().lower() if column is None: column = get_key_from_mission_info(db, "time", "TIME", instr, mode) ev_list = np.array(datatable.field(column), dtype=np.longdouble) detector_id = None ckey = get_key_from_mission_info(db, "ccol", "NONE", instr, mode) if ckey != "NONE" and ckey in datatable.columns.names: detector_id = datatable.field(ckey) det_number = None if detector_id is None else list(set(detector_id)) timezero = np.longdouble(0.) if "TIMEZERO" in header: timezero = np.longdouble(header["TIMEZERO"]) ev_list += timezero t_start = ev_list[0] t_stop = ev_list[-1] if "TSTART" in header: t_start = np.longdouble(header["TSTART"]) if "TSTOP" in header: t_stop = np.longdouble(header["TSTOP"]) mjdref = np.longdouble(high_precision_keyword_read(header, "MJDREF")) # Read and handle GTI extension accepted_gtistrings = gtistring.split(",") if gti_file is None: # Select first GTI with accepted name try: gti_list = get_gti_from_all_extensions( hdulist, accepted_gtistrings=accepted_gtistrings, det_numbers=det_number, ) except Exception: # pragma: no cover warnings.warn( "No extensions found with a valid name. " "Please check the `accepted_gtistrings` values.", AstropyUserWarning, ) gti_list = np.array([[t_start, t_stop]], dtype=np.longdouble) else: gti_list = load_gtis(gti_file, gtistring) pi_col = get_key_from_mission_info(db, "ecol", "PI", instr, mode) if additional_columns is None: additional_columns = [pi_col] if pi_col not in additional_columns: additional_columns.append(pi_col) additional_data = _get_additional_data(datatable, additional_columns) hdulist.close() # Sort event list order = np.argsort(ev_list) ev_list = ev_list[order] if detector_id is not None: detector_id = detector_id[order] additional_data = order_list_of_arrays(additional_data, order) pi = additional_data[pi_col].astype(np.float32) cal_pi = pi # EventReadOutput() is an empty class. We will assign a number of attributes to # it, like the arrival times of photons, the energies, and some information # from the header. returns = EventReadOutput() returns.ev_list = ev_list returns.gti_list = gti_list returns.pi_list = pi returns.cal_pi_list = cal_pi if "energy" in additional_data: returns.energy_list = additional_data["energy"] else: try: returns.energy_list = rough_calibration(cal_pi, mission) except ValueError: returns.energy_list = None returns.instr = instr.lower() returns.mission = mission.lower() returns.mjdref = mjdref returns.header = header.tostring() returns.additional_data = additional_data returns.t_start = t_start returns.t_stop = t_stop returns.detector_id = detector_id returns.ephem = ephem returns.timeref = timeref returns.timesys = timesys return returns class EventReadOutput(): def __init__(self): pass def mkdir_p(path): # pragma: no cover """Safe ``mkdir`` function, found at [so-mkdir]_. Parameters ---------- path : str The absolute path to the directory to be created Notes ----- .. [so-mkdir] http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python """ import os import errno try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def read_header_key(fits_file, key, hdu=1): """Read the header key key from HDU hdu of the file ``fits_file``. Parameters ---------- fits_file: str The file name and absolute path to the event file. key: str The keyword to be read Other Parameters ---------------- hdu : int Index of the HDU extension from which the header key to be read. Returns ------- value : object The value stored under ``key`` in ``fits_file`` """ hdulist = fits.open(fits_file, ignore_missing_end=True) try: value = hdulist[hdu].header[key] except KeyError: # pragma: no cover value = '' hdulist.close() return value def ref_mjd(fits_file, hdu=1): """Read ``MJDREFF``, ``MJDREFI`` or, if failed, ``MJDREF``, from the FITS header. Parameters ---------- fits_file : str The file name and absolute path to the event file. Other Parameters ---------------- hdu : int Index of the HDU extension from which the header key to be read. Returns ------- mjdref : numpy.longdouble the reference MJD """ if isinstance(fits_file, Iterable) and\ not is_string(fits_file): # pragma: no cover fits_file = fits_file[0] logging.info("opening %s" % fits_file) hdulist = fits.open(fits_file, ignore_missing_end=True) ref_mjd_val = high_precision_keyword_read(hdulist[hdu].header, "MJDREF") hdulist.close() return ref_mjd_val def common_name(str1, str2, default='common'): """Strip two strings of the letters not in common. Filenames must be of same length and only differ by a few letters. Parameters ---------- str1 : str str2 : str Other Parameters ---------------- default : str The string to return if ``common_str`` is empty Returns ------- common_str : str A string containing the parts of the two names in common """ if not len(str1) == len(str2): return default common_str = '' # Extract the MP root of the name (in case they're event files) for i, letter in enumerate(str1): if str2[i] == letter: common_str += letter # Remove leading and trailing underscores and dashes common_str = common_str.rstrip('_').rstrip('-') common_str = common_str.lstrip('_').lstrip('-') if common_str == '': common_str = default logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str)) return common_str def split_numbers(number, shift=0): """ Split high precision number(s) into doubles. You can specify the number of shifts to move the decimal point. Parameters ---------- number: long double The input high precision number which is to be split Other parameters ---------------- shift: integer Move the cut by `shift` decimal points to the right (left if negative) Returns ------- number_I: double First part of high precision number number_F: double Second part of high precision number Examples -------- >>> n = 12.34 >>> i, f = split_numbers(n) >>> i == 12 True >>> np.isclose(f, 0.34) True >>> split_numbers(n, 2) (12.34, 0.0) >>> split_numbers(n, -1) (10.0, 2.34) """ if isinstance(number, Iterable): number = np.asarray(number) number *= 10**shift mods = [math.modf(n) for n in number] number_F = [f for f, _ in mods] number_I = [i for _, i in mods] else: number *= 10**shift number_F, number_I = math.modf(number) return np.double(number_I) / 10**shift, np.double(number_F) / 10**shift def _save_pickle_object(object, filename): """ Save a class object in pickle format. Parameters ---------- object: class instance A class object whose attributes are saved in a dictionary format filename: str Name of the file in which object is saved """ with open(filename, "wb") as f: pickle.dump(object, f) def _retrieve_pickle_object(filename): """ Retrieves a pickled class object. Parameters ---------- filename: str Name of the file in which object is saved Returns ------- data: class object """ with open(filename, "rb") as f: return pickle.load(f) def _save_hdf5_object(object, filename): """ Save a class object in hdf5 format. Parameters ---------- object: class instance A class object whose attributes are saved in a dictionary format filename: str Name of the file in which object is saved """ items = vars(object) attrs = [name for name in items if items[name] is not None] with h5py.File(filename, 'w') as hf: for attr in attrs: data = items[attr] # If data is a single number, store as an attribute. if _isattribute(data): if isinstance(data, np.longdouble): data_I, data_F = split_numbers(data) names = [attr + '_I', attr + '_F'] hf.attrs[names[0]] = data_I hf.attrs[names[1]] = data_F else: hf.attrs[attr] = data # If data is an array or list, create a dataset. else: try: if isinstance(data[0], np.longdouble): data_I, data_F = split_numbers(data) names = [attr + '_I', attr + '_F'] hf.create_dataset(names[0], data=data_I) hf.create_dataset(names[1], data=data_F) else: hf.create_dataset(attr, data=data) except IndexError: # To account for numpy arrays of type 'None' (0-d) pass def _retrieve_hdf5_object(filename): """ Retrieves an hdf5 format class object. Parameters ---------- filename: str The name of file with which object was saved Returns ------- data: dictionary Loads the data from an hdf5 object file and returns in dictionary format. """ with h5py.File(filename, 'r') as hf: dset_keys = hf.keys() attr_keys = hf.attrs.keys() data = {} dset_copy = list(dset_keys)[:] for key in dset_keys: # Make sure key hasn't been removed if key in dset_copy: # Longdouble case if key[-2:] in ['_I', '_F']: m_key = key[:-2] # Add integer and float parts data[m_key] = np.longdouble(hf[m_key + '_I'][()]) data[m_key] += np.longdouble(hf[m_key + '_F'][()]) # Remove integer and float parts from attributes dset_copy.remove(m_key + '_I') dset_copy.remove(m_key + '_F') else: data[key] = hf[key][()] attr_copy = list(attr_keys)[:] for key in attr_keys: # Make sure key hasn't been removed if key in attr_copy: # Longdouble case if key[-2:] in ['_I', '_F']: m_key = key[:-2] # Add integer and float parts data[m_key] = np.longdouble(hf.attrs[m_key + '_I']) data[m_key] += np.longdouble(hf.attrs[m_key + '_F']) # Remove integer and float parts from attributes attr_copy.remove(m_key + '_I') attr_copy.remove(m_key + '_F') else: data[key] = hf.attrs[key] return data def _save_ascii_object(object, filename, fmt="%.18e", **kwargs): """ Save an array to a text file. Parameters ---------- object : numpy.ndarray An array with the data to be saved filename : str The file name to save to fmt : str or sequence of strs, optional Use for formatting of columns. See `numpy.savetxt` documentation for details. Other Parameters ---------------- kwargs : any keyword argument taken by `numpy.savetxt` """ try: np.savetxt(filename, object, fmt=fmt, **kwargs) except TypeError: raise Exception("Formatting of columns not recognized! Use 'fmt' " "to format columns including strings or mixed types!") pass def _retrieve_ascii_object(filename, **kwargs): """ Helper function to retrieve ascii objects from file. Uses astropy.Table for reading and storing the data. Parameters ---------- filename : str The name of the file with the data to be retrieved. Other Parameters ----------------------------- usecols : {int | iterable} The indices of the columns in the file to be returned. By default, all columns will be returned skiprows : int The number of rows at the beginning to skip By default, no rows will be skipped. names : iterable A list of column names to be attached to the columns. By default, no column names are added, unless they are specified in the file header and can be read by astropy.Table.read automatically. Returns ------- data : astropy.Table object An astropy.Table object with the data from the file """ if not isinstance(filename, str): raise TypeError("filename must be string!") if 'usecols' in list(kwargs.keys()): if np.size(kwargs['usecols']) != 2: raise ValueError("Need to define two columns") usecols = kwargs["usecols"] else: usecols = None if 'skiprows' in list(kwargs.keys()): assert isinstance(kwargs["skiprows"], int) skiprows = kwargs["skiprows"] else: skiprows = 0 if "names" in list(kwargs.keys()): names = kwargs["names"] else: names = None data = Table.read(filename, data_start=skiprows, names=names, format="ascii") if usecols is None: return data else: colnames = np.array(data.colnames) cols = colnames[usecols] return data[cols] def _save_fits_object(object, filename, **kwargs): """ Save a class object in fits format. Parameters ---------- object: class instance A class object whose attributes would be saved in a dictionary format. filename: str The file name to save to Additional Keyword Parameters ----------------------------- tnames: str iterable The names of HDU tables. For instance, in case of eventlist, tnames could be ['EVENTS', 'GTI'] colsassign: dictionary iterable This indicates the correct tables to which to assign columns to. If this is None or if a column is not provided, it/they will be assigned to the first table. For example, [{'gti':'GTI'}] indicates that gti values should be stored in GTI table. """ tables = [] if 'colsassign' in list(kwargs.keys()): colsassign = kwargs['colsassign'] iscolsassigned = True else: iscolsassigned = False if 'tnames' in list(kwargs.keys()): tables = kwargs['tnames'] else: tables = ['MAIN'] items = vars(object) attrs = [name for name in items if items[name] is not None] cols = [] hdrs = [] for t in tables: cols.append([]) hdrs.append(fits.Header()) for attr in attrs: data = items[attr] # Get the index of table to which column belongs if iscolsassigned and attr in colsassign.keys(): index = tables.index(colsassign[attr]) else: index = 0 # If data is a single number, store as metadata if _isattribute(data): if isinstance(data, np.longdouble): # Longdouble case. Split and save integer and float parts data_I, data_F = split_numbers(data) names = [attr + '_I', attr + '_F'] hdrs[index][names[0]] = data_I hdrs[index][names[1]] = data_F else: # Normal case. Save as it is hdrs[index][attr] = data # If data is an array or list, insert as table column else: try: if isinstance(data[0], np.longdouble): # Longdouble case. Split and save integer and float parts data_I, data_F = split_numbers(data) names = [attr + '_I', attr + '_F'] cols[index].append( fits.Column(name=names[0], format='D', array=data_I)) cols[index].append( fits.Column(name=names[1], format='D', array=data_F)) else: # Normal case. Save as it is cols[index].append( fits.Column(name=attr, format=_lookup_format(data[0]), array=data)) except IndexError: # To account for numpy arrays of type 'None' (0-d) pass tbhdu = fits.HDUList() # Create binary tables for i in range(0, len(tables)): if len(cols[i]) > 0: tbhdu.append(fits.BinTableHDU.from_columns(cols[i], header=hdrs[i], name=tables[i])) tbhdu.writeto(filename) def _retrieve_fits_object(filename, **kwargs): """ Retrieves a fits format class object. Parameters ---------- filename: str The name of file with which object was saved Other Parameters ---------------- cols: str iterable The names of columns to extract from fits tables. Returns ------- data: dictionary Loads the data from a fits object file and returns in dictionary format. """ data = {} if 'cols' in list(kwargs.keys()): cols = [col.upper() for col in kwargs['cols']] else: cols = [] with fits.open(filename, memmap=False, ignore_missing_end=True) as hdulist: fits_cols = [] # Get columns from all tables for i in range(1, len(hdulist)): fits_cols.append([h.upper() for h in hdulist[i].data.names]) for c in cols: for i in range(0, len(fits_cols)): # .upper() is used because `fits` stores values in upper case hdr_keys = [h.upper() for h in hdulist[i + 1].header.keys()] # Longdouble case. Check for columns if c + '_I' in fits_cols[i] or c + '_F' in fits_cols[i]: if c not in data.keys(): data[c] = np.longdouble(hdulist[i + 1].data[c + '_I']) data[c] += np.longdouble(hdulist[i + 1].data[c + '_F']) # Longdouble case. Check for header keys if c + '_I' in hdr_keys or c + '_F' in hdr_keys: if c not in data.keys(): data[c] = \ np.longdouble(hdulist[i + 1].header[c + '_I']) data[c] += \ np.longdouble(hdulist[i + 1].header[c + '_F']) # Normal case. Check for columns elif c in fits_cols[i]: data[c] = hdulist[i + 1].data[c] # Normal case. Check for header keys elif c in hdr_keys: data[c] = hdulist[i + 1].header[c] hdulist.close() return data def _lookup_format(var): """ Looks up relevant format in fits. Parameters ---------- var : object An object to look up in the table Returns ------- lookup : str The str describing the type of ``var`` """ lookup = {"<type 'int'>": "J", "<type 'float'>": "E", "<type 'numpy.int64'>": "K", "<type 'numpy.float64'>": "D", "<type 'numpy.float128'>": "D", "<type 'str'>": "30A", "<type 'bool'": "L"} form = type(var) try: return lookup[str(form)] except KeyError: # If an entry is not contained in lookup dictionary return "D" def _isattribute(data): """ Check if data is a single number or an array. Parameters ---------- data : object The object to be checked. Returns: bool True if the data is a single number, False if it is an iterable. """ if isinstance(data, Iterable) and not isinstance(data, (str, bytes)): return False else: return True def write(input_, filename, format_='pickle', **kwargs): """ Pickle a class instance. For parameters depending on ``format_``, see individual function definitions. Parameters ---------- object: a class instance The object to be stored filename: str The name of the file to be created format_: str The format in which to store file. Formats supported are ``pickle``, ``hdf5``, ``ascii`` or ``fits`` """ if format_ == 'pickle': _save_pickle_object(input_, filename) elif format_ == 'hdf5': if _H5PY_INSTALLED: _save_hdf5_object(input_, filename) else: utils.simon('h5py not installed, using pickle instead' 'to save object.') _save_pickle_object(input_, filename.split('.')[0] + '.pickle') elif format_ == 'ascii': _save_ascii_object(input_, filename, **kwargs) elif format_ == 'fits': _save_fits_object(input_, filename, **kwargs) else: utils.simon('Format not understood.') def read(filename, format_='pickle', **kwargs): """ Return a saved class instance. Parameters ---------- filename: str The name of the file to be retrieved. format_: str The format used to store file. Supported formats are pickle, hdf5, ascii or fits. Returns ------- data : {``object`` | ``astropy.table`` | ``dict``} * If ``format_`` is ``pickle``, an object is returned. * If ``format_`` is ``ascii``, `astropy.table` object is returned. * If ``format_`` is ``hdf5`` or 'fits``, a dictionary object is returned. """ if format_ == 'pickle': return _retrieve_pickle_object(filename) elif format_ == 'hdf5': if _H5PY_INSTALLED: return _retrieve_hdf5_object(filename) else: utils.simon('h5py not installed, cannot read an' 'hdf5 object.') elif format_ == 'ascii': return _retrieve_ascii_object(filename, **kwargs) elif format_ == 'fits': return _retrieve_fits_object(filename, **kwargs) else: utils.simon('Format not understood.') def savefig(filename, **kwargs): """ Save a figure plotted by ``matplotlib``. Note : This function is supposed to be used after the ``plot`` function. Otherwise it will save a blank image with no plot. Parameters ---------- filename : str The name of the image file. Extension must be specified in the file name. For example filename with `.png` extension will give a rasterized image while ``.pdf`` extension will give a vectorized output. kwargs : keyword arguments Keyword arguments to be passed to ``savefig`` function of ``matplotlib.pyplot``. For example use `bbox_inches='tight'` to remove the undesirable whitepace around the image. """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Matplotlib required for savefig()") if not plt.fignum_exists(1): utils.simon("use ``plot`` function to plot the image first and " "then use ``savefig`` to save the figure.") plt.savefig(filename, **kwargs)
[]
[]
[ "HEADAS" ]
[]
["HEADAS"]
python
1
0
app/eMenu/settings.py
""" Django settings for eMenu project. Generated by 'django-admin startproject' using Django 3.2.3. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path from datetime import timedelta import os import sys # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-#wl5z(!)p0nau_d=sp_7)58870%7l=yp(_mu4w*ceswoq_i(=t' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Third-Party Apps 'drf_yasg', 'rest_framework', 'rest_framework.authtoken', 'django_filters', # Local Apps 'core', 'cards', 'dishes' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'eMenu.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'eMenu.wsgi.application' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DEFAULT_FROM_MAIL = '[email protected]' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': os.getenv('POSTGRES_DB'), 'USER': os.getenv('POSTGRES_USER'), 'PASSWORD': os.getenv('POSTGRES_PASSWORD'), 'HOST': os.getenv('POSTGRES_HOST'), 'PORT': os.getenv('POSTGRES_PORT'), } } if 'test' in sys.argv: DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'} # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Authentication REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication' ], 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.IsAuthenticatedOrReadOnly' ], 'DEFAULT_FILTER_BACKENDS': [ 'django_filters.rest_framework.DjangoFilterBackend', ], 'DEFAULT_PAGINATION_CLASS': 'core.pagination.StandardResultsSetPagination', } # Celery Configuration Options CELERY_TIMEZONE = "Europe/Warsaw" CELERY_TASK_TRACK_STARTED = True CELERY_TASK_TIME_LIMIT = 30 * 60 CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', default='redis://redis:6379') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', default='redis://redis:6379') CELERY_ACCEPT_CONTENT = os.getenv('CELERY_ACCEPT_CONTENT', default=['application/json']) CELERY_TASK_SERIALIZER = os.getenv('CELERY_TASK_SERIALIZER', default='json') CELERY_RESULT_SERIALIZER = os.getenv('CELERY_RESULT_SERIALIZER', default='json') # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'pl' TIME_ZONE = 'Europe/Warsaw' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = '/vol/backend/media' STATIC_ROOT = '/vol/backend/static' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' RECENT_DISH_TIMEDELTA = timedelta(days=1)
[]
[]
[ "POSTGRES_USER", "CELERY_BROKER_URL", "CELERY_TASK_SERIALIZER", "CELERY_ACCEPT_CONTENT", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_DB", "CELERY_RESULT_SERIALIZER", "POSTGRES_PASSWORD", "CELERY_RESULT_BACKEND" ]
[]
["POSTGRES_USER", "CELERY_BROKER_URL", "CELERY_TASK_SERIALIZER", "CELERY_ACCEPT_CONTENT", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_DB", "CELERY_RESULT_SERIALIZER", "POSTGRES_PASSWORD", "CELERY_RESULT_BACKEND"]
python
10
0
src/server/core/tests/test_config.py
import os import unittest from flask import current_app from flask_testing import TestCase from core import masakhane class TestDevelopmentConfig(TestCase): def create_app(self): masakhane.config.from_object('core.config.DevelopmentConfig') return masakhane def test_app_is_development(self): self.assertTrue(masakhane.config['SECRET_KEY'] == "super-secret-key") self.assertFalse(current_app is None) self.assertTrue( masakhane.config['SQLALCHEMY_DATABASE_URI'] == os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db") ) class TestTestingConfig(TestCase): def create_app(self): masakhane.config.from_object('core.config.StagingConfig') return masakhane def test_app_is_testing(self): self.assertTrue(masakhane.config['SECRET_KEY'] == "key_testing") self.assertTrue(masakhane.config['TESTING']) self.assertTrue( masakhane.config['SQLALCHEMY_DATABASE_URI'] == os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db") ) class TestProductionConfig(TestCase): def create_app(self): masakhane.config.from_object('core.config.ProductionConfig') return masakhane def test_app_is_production(self): self.assertTrue(masakhane.config['SECRET_KEY'] == "key_production") self.assertFalse(masakhane.config['TESTING']) if __name__ == '__main__': unittest.main()
[]
[]
[ "DATABASE_TEST_URL" ]
[]
["DATABASE_TEST_URL"]
python
1
0
bl/osInfo.go
package bl import "github.com/shirou/gopsutil/host" type OSInfo struct { OS string PlatformVersion string KernelVersion string } func NewOsInfo() *OSInfo { infoStat, _ := host.Info() return &OSInfo{ OS: infoStat.OS, KernelVersion: infoStat.KernelVersion, PlatformVersion: infoStat.PlatformVersion, } }
[]
[]
[]
[]
[]
go
null
null
null
cmd/minikube/cmd/env.go
/* Copyright 2020 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Part of this code is heavily inspired/copied by the following file: // github.com/docker/machine/commands/env.go package cmd import ( "fmt" "io" "net" "os" "strconv" "strings" "text/template" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/shell" "github.com/docker/machine/libmachine/state" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" ) var envTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}", constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv) const ( fishSetPfx = "set -gx " fishSetSfx = "\"\n" fishSetDelim = " \"" fishUnsetPfx = "set -e " fishUnsetSfx = "\n" psSetPfx = "$Env:" psSetSfx = "\"\n" psSetDelim = " = \"" psUnsetPfx = `Remove-Item Env:\\` psUnsetSfx = "\n" cmdSetPfx = "SET " cmdSetSfx = "\n" cmdSetDelim = "=" cmdUnsetPfx = "SET " cmdUnsetSfx = "\n" cmdUnsetDelim = "=" emacsSetPfx = "(setenv \"" emacsSetSfx = "\")\n" emacsSetDelim = "\" \"" emacsUnsetPfx = "(setenv \"" emacsUnsetSfx = ")\n" emacsUnsetDelim = "\" nil" bashSetPfx = "export " bashSetSfx = "\"\n" bashSetDelim = "=\"" bashUnsetPfx = "unset " bashUnsetSfx = "\n" nonePfx = "" noneSfx = "\n" noneDelim = "=" ) // ShellConfig represents the shell config type ShellConfig struct { Prefix string Delimiter string Suffix string DockerCertPath string DockerHost string DockerTLSVerify string MinikubeDockerdProfile string UsageHint string NoProxyVar string NoProxyValue string } var ( noProxy bool forceShell string unset bool defaultNoProxyGetter NoProxyGetter ) // NoProxyGetter gets the no_proxy variable type NoProxyGetter interface { GetNoProxyVar() (string, string) } // EnvNoProxyGetter gets the no_proxy variable, using environment type EnvNoProxyGetter struct{} func generateUsageHint(profile, sh string) string { const usgPlz = "To point your shell to minikube's docker-daemon, run:" var usgCmd = fmt.Sprintf("minikube -p %s docker-env", profile) var usageHintMap = map[string]string{ "bash": fmt.Sprintf(` # %s # eval $(%s) `, usgPlz, usgCmd), "fish": fmt.Sprintf(` # %s # eval (%s) `, usgPlz, usgCmd), "powershell": fmt.Sprintf(`# %s # & %s | Invoke-Expression `, usgPlz, usgCmd), "cmd": fmt.Sprintf(`REM %s REM @FOR /f "tokens=*" %%i IN ('%s') DO @%%i `, usgPlz, usgCmd), "emacs": fmt.Sprintf(`;; %s ;; (with-temp-buffer (shell-command "%s" (current-buffer)) (eval-buffer)) `, usgPlz, usgCmd), } hint, ok := usageHintMap[sh] if !ok { return usageHintMap["bash"] } return hint } // shellCfgSet generates context variables for "docker-env" func shellCfgSet(ec EnvConfig, envMap map[string]string) *ShellConfig { s := &ShellConfig{ DockerCertPath: envMap[constants.DockerCertPathEnv], DockerHost: envMap[constants.DockerHostEnv], DockerTLSVerify: envMap[constants.DockerTLSVerifyEnv], MinikubeDockerdProfile: envMap[constants.MinikubeActiveDockerdEnv], UsageHint: generateUsageHint(ec.profile, ec.shell), } if ec.noProxy { noProxyVar, noProxyValue := defaultNoProxyGetter.GetNoProxyVar() // add the docker host to the no_proxy list idempotently switch { case noProxyValue == "": noProxyValue = ec.hostIP case strings.Contains(noProxyValue, ec.hostIP): // ip already in no_proxy list, nothing to do default: noProxyValue = fmt.Sprintf("%s,%s", noProxyValue, ec.hostIP) } s.NoProxyVar = noProxyVar s.NoProxyValue = noProxyValue } switch ec.shell { case "fish": s.Prefix = fishSetPfx s.Suffix = fishSetSfx s.Delimiter = fishSetDelim case "powershell": s.Prefix = psSetPfx s.Suffix = psSetSfx s.Delimiter = psSetDelim case "cmd": s.Prefix = cmdSetPfx s.Suffix = cmdSetSfx s.Delimiter = cmdSetDelim case "emacs": s.Prefix = emacsSetPfx s.Suffix = emacsSetSfx s.Delimiter = emacsSetDelim case "none": s.Prefix = nonePfx s.Suffix = noneSfx s.Delimiter = noneDelim s.UsageHint = "" default: s.Prefix = bashSetPfx s.Suffix = bashSetSfx s.Delimiter = bashSetDelim } return s } // GetNoProxyVar gets the no_proxy var func (EnvNoProxyGetter) GetNoProxyVar() (string, string) { // first check for an existing lower case no_proxy var noProxyVar := "no_proxy" noProxyValue := os.Getenv("no_proxy") // otherwise default to allcaps HTTP_PROXY if noProxyValue == "" { noProxyVar = "NO_PROXY" noProxyValue = os.Getenv("NO_PROXY") } return noProxyVar, noProxyValue } // isDockerActive checks if Docker is active func isDockerActive(d drivers.Driver) (bool, error) { client, err := drivers.GetSSHClientFromDriver(d) if err != nil { return false, err } output, err := client.Output("sudo systemctl is-active docker") if err != nil { return false, err } // systemd returns error code on inactive s := strings.TrimSpace(output) return err == nil && s == "active", nil } // envCmd represents the docker-env command var dockerEnvCmd = &cobra.Command{ Use: "docker-env", Short: "Sets up docker env variables; similar to '$(docker-machine env)'", Long: `Sets up docker env variables; similar to '$(docker-machine env)'.`, Run: func(cmd *cobra.Command, args []string) { api, err := machine.NewAPIClient() if err != nil { exit.WithError("Error getting client", err) } defer api.Close() profile := viper.GetString(config.MachineProfile) cc, err := config.Load(profile) if err != nil { exit.WithError("Error getting config", err) } host, err := cluster.CheckIfHostExistsAndLoad(api, cc.Name) if err != nil { exit.WithError("Error getting host", err) } if host.Driver.DriverName() == driver.None { exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) } hostSt, err := cluster.GetHostStatus(api, cc.Name) if err != nil { exit.WithError("Error getting host status", err) } if hostSt != state.Running.String() { exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) } ok, err := isDockerActive(host.Driver) if err != nil { exit.WithError("Error getting service status", err) } if !ok { exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile}) } hostIP, err := host.Driver.GetIP() if err != nil { exit.WithError("Error getting host IP", err) } ec := EnvConfig{ profile: profile, driver: host.DriverName, shell: forceShell, hostIP: hostIP, certsDir: localpath.MakeMiniPath("certs"), noProxy: noProxy, } if ec.shell == "" { ec.shell, err = shell.Detect() if err != nil { exit.WithError("Error detecting shell", err) } } if unset { if err := unsetScript(ec, os.Stdout); err != nil { exit.WithError("Error generating unset output", err) } return } if err := setScript(ec, os.Stdout); err != nil { exit.WithError("Error generating set output", err) } }, } // EnvConfig encapsulates all external inputs into shell generation type EnvConfig struct { profile string shell string driver string hostIP string certsDir string noProxy bool } // setScript writes out a shell-compatible 'docker-env' script func setScript(ec EnvConfig, w io.Writer) error { tmpl := template.Must(template.New("envConfig").Parse(envTmpl)) envVars, err := dockerEnvVars(ec) if err != nil { return err } return tmpl.Execute(w, shellCfgSet(ec, envVars)) } // setScript writes out a shell-compatible 'docker-env unset' script func unsetScript(ec EnvConfig, w io.Writer) error { vars := []string{ constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv, } if ec.noProxy { k, _ := defaultNoProxyGetter.GetNoProxyVar() if k != "" { vars = append(vars, k) } } var sb strings.Builder switch ec.shell { case "fish": for _, v := range vars { sb.WriteString(fmt.Sprintf("%s%s%s", fishUnsetPfx, v, fishUnsetSfx)) } case "powershell": sb.WriteString(fmt.Sprintf("%s%s%s", psUnsetPfx, strings.Join(vars, " Env:\\\\"), psUnsetSfx)) case "cmd": for _, v := range vars { sb.WriteString(fmt.Sprintf("%s%s%s%s", cmdUnsetPfx, v, cmdUnsetDelim, cmdUnsetSfx)) } case "emacs": for _, v := range vars { sb.WriteString(fmt.Sprintf("%s%s%s%s", emacsUnsetPfx, v, emacsUnsetDelim, emacsUnsetSfx)) } case "none": sb.WriteString(fmt.Sprintf("%s%s%s", nonePfx, strings.Join(vars, " "), noneSfx)) default: sb.WriteString(fmt.Sprintf("%s%s%s", bashUnsetPfx, strings.Join(vars, " "), bashUnsetSfx)) } _, err := w.Write([]byte(sb.String())) return err } // dockerURL returns a the docker endpoint URL for an ip/port pair. func dockerURL(ip string, port int) string { return fmt.Sprintf("tcp://%s", net.JoinHostPort(ip, strconv.Itoa(port))) } // dockerEnvVars gets the necessary docker env variables to allow the use of minikube's docker daemon func dockerEnvVars(ec EnvConfig) (map[string]string, error) { env := map[string]string{ constants.DockerTLSVerifyEnv: "1", constants.DockerHostEnv: dockerURL(ec.hostIP, constants.DockerDaemonPort), constants.DockerCertPathEnv: ec.certsDir, constants.MinikubeActiveDockerdEnv: ec.profile, } if driver.IsKIC(ec.driver) { // for kic we need to find out what port docker allocated during creation port, err := oci.HostPortBinding(ec.driver, ec.profile, constants.DockerDaemonPort) if err != nil { return nil, errors.Wrapf(err, "get hostbind port for %d", constants.DockerDaemonPort) } env[constants.DockerCertPathEnv] = dockerURL(kic.DefaultBindIPV4, port) } return env, nil } func init() { defaultNoProxyGetter = &EnvNoProxyGetter{} dockerEnvCmd.Flags().BoolVar(&noProxy, "no-proxy", false, "Add machine IP to NO_PROXY environment variable") dockerEnvCmd.Flags().StringVar(&forceShell, "shell", "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect") dockerEnvCmd.Flags().BoolVarP(&unset, "unset", "u", false, "Unset variables instead of setting them") }
[ "\"no_proxy\"", "\"NO_PROXY\"" ]
[]
[ "NO_PROXY", "no_proxy" ]
[]
["NO_PROXY", "no_proxy"]
go
2
0
var/spack/repos/builtin/packages/legion/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * class Legion(CMakePackage): """Legion is a data-centric parallel programming system for writing portable high performance programs targeted at distributed heterogeneous architectures. Legion presents abstractions which allow programmers to describe properties of program data (e.g. independence, locality). By making the Legion programming system aware of the structure of program data, it can automate many of the tedious tasks programmers currently face, including correctly extracting task- and data-level parallelism and moving data around complex memory hierarchies. A novel mapping interface provides explicit programmer controlled placement of data in the memory hierarchy and assignment of tasks to processors in a way that is orthogonal to correctness, thereby enabling easy porting and tuning of Legion applications to new architectures.""" homepage = "https://legion.stanford.edu/" git = "https://github.com/StanfordLegion/legion.git" maintainers = ['pmccormick', 'streichler'] tags = ['e4s'] version('21.03.0', tag='legion-21.03.0') version('stable', branch='stable') version('master', branch='master') version('cr', branch='control_replication') depends_on("[email protected]:", type='build') # TODO: Need to spec version of MPI v3 for use of the low-level MPI transport # layer. At present the MPI layer is still experimental and we discourge its # use for general (not legion development) use cases. depends_on('mpi', when='network=mpi') depends_on('mpi', when='network=gasnet') # MPI is required to build gasnet (needs mpicc). depends_on('ucx', when='conduit=ucx') depends_on('mpi', when='conduit=mpi') depends_on('[email protected]:11.9', when='+cuda_unsupported_compiler') depends_on('[email protected]:11.9', when='+cuda') depends_on('hdf5', when='+hdf5') depends_on('hwloc', when='+hwloc') # cuda-centric # reminder for arch numbers to names: 60=pascal, 70=volta, 75=turing, 80=ampere # TODO: we could use a map here to clean up and use naming vs. numbers. cuda_arch_list = ('60', '70', '75', '80') for nvarch in cuda_arch_list: depends_on('[email protected]+cuda+cuda_lambda+wrapper cuda_arch={0}'.format(nvarch), when='%gcc+kokkos+cuda cuda_arch={0}'.format(nvarch)) depends_on("[email protected]+cuda+cuda_lambda~wrapper cuda_arch={0}".format(nvarch), when="%clang+kokkos+cuda cuda_arch={0}".format(nvarch)) depends_on('[email protected]~cuda', when='+kokkos~cuda') depends_on("[email protected]~cuda+openmp", when='+kokkos+openmp') depends_on('python@3', when='+python') depends_on('papi', when='+papi') depends_on('zlib', when='+zlib') # A C++ standard variant to work-around some odd behaviors with apple-clang # but this might be helpful for other use cases down the road. Legion's # current development policy is C++11 or greater so we capture that aspect # here. cpp_stds = ["11", "14", "17", "20"] variant('cxxstd', default="11", values=cpp_stds, multi=False) # TODO: Need a AMD/HIP variant to match support landing in 21.03.0. # Network transport layer: the underlying data transport API should be used for # distributed data movement. For Legion, gasnet is the currently the most # mature. We have many users that default to using no network layer for # day-to-day development thus we default to 'none'. MPI support is new and # should be considered as a beta release. variant('network', default='none', values=('gasnet', 'mpi', 'none'), description="The network communications/transport layer to use.", multi=False) # Add Gasnet tarball dependency in spack managed manner # TODO: Provide less mutable tag instead of branch resource(name='stanfordgasnet', git='https://github.com/StanfordLegion/gasnet.git', destination='stanfordgasnet', branch='master', when='network=gasnet') # We default to automatically embedding a gasnet build. To override this # point the package a pre-installed version of GASNet-Ex via the gasnet_root # variant. # # make sure we have a valid directory provided for gasnet_root... def validate_gasnet_root(value): if value == 'none': return True if not os.path.isdir(value): print("gasnet_root:", value, "-- no such directory.") return False else: return True variant('gasnet_root', default='none', values=validate_gasnet_root, description="Path to a pre-installed version of GASNet (prefix directory).", multi=False) conflicts('gasnet_root', when="network=mpi") variant('conduit', default='none', values=('aries', 'ibv', 'udp', 'mpi', 'ucx', 'none'), description="The gasnet conduit(s) to enable.", multi=False) conflicts('conduit=none', when='network=gasnet', msg="a conduit must be selected when 'network=gasnet'") gasnet_conduits = ('aries', 'ibv', 'udp', 'mpi', 'ucx') for c in gasnet_conduits: conflict_str = 'conduit=%s' % c conflicts(conflict_str, when='network=mpi', msg="conduit attribute requires 'network=gasnet'.") conflicts(conflict_str, when='network=none', msg="conduit attribute requires 'network=gasnet'.") variant('gasnet_debug', default=False, description="Build gasnet with debugging enabled.") conflicts('+gasnet_debug', when='network=mpi') conflicts('+gasnet_debug', when='network=none') variant('shared', default=False, description="Build shared libraries.") variant('bounds_checks', default=False, description="Enable bounds checking in Legion accessors.") variant('privilege_checks', default=False, description="Enable runtime privildge checks in Legion accessors.") variant('enable_tls', default=False, description="Enable thread-local-storage of the Legion context.") variant('output_level', default='warning', # Note: these values are dependent upon those used in the cmake config. values=("spew", "debug", "info", "print", "warning", "error", "fatal", "none"), description="Set the compile-time logging level.", multi=False) variant('spy', default=False, description="Enable detailed logging for Legion Spy debugging.") # note: we will be dependent upon spack's latest-and-greatest cuda version... variant('cuda', default=False, description="Enable CUDA support.") variant('cuda_hijack', default=False, description="Hijack application calls into the CUDA runtime (+cuda).") variant('cuda_arch', default='70', values=cuda_arch_list, description="GPU/CUDA architecture to build for.", multi=False) variant('cuda_unsupported_compiler', default=False, description="Disable nvcc version check (--allow-unsupported-compiler).") conflicts('+cuda_hijack', when='~cuda') variant('fortran', default=False, description="Enable Fortran bindings.") variant('hdf5', default=False, description="Enable support for HDF5.") variant('hwloc', default=False, description="Use hwloc for topology awareness.") variant('kokkos', default=False, description="Enable support for interoperability with Kokkos.") variant('bindings', default=False, description="Build runtime language bindings (excl. Fortran).") variant('libdl', default=True, description="Enable support for dynamic object/library loading.") variant('openmp', default=False, description="Enable support for OpenMP within Legion tasks.") variant('papi', default=False, description="Enable PAPI performance measurements.") variant('python', default=False, description="Enable Python support.") variant('zlib', default=True, description="Enable zlib support.") variant('redop_complex', default=False, description="Use reduction operators for complex types.") variant('max_dims', values=int, default=3, description="Set max number of dimensions for logical regions.") variant('max_fields', values=int, default=512, description="Maximum number of fields allowed in a logical region.") def cmake_args(self): spec = self.spec cmake_cxx_flags = [] from_variant = self.define_from_variant options = [ from_variant("CMAKE_CXX_STANDARD", "cxxstd") ] if 'network=gasnet' in spec: options.append('-DLegion_NETWORKS=gasnetex') if spec.variants['gasnet_root'].value != 'none': gasnet_dir = spec.variants['gasnet_root'].value options.append('-DGASNet_ROOT_DIR=%s' % gasnet_dir) else: gasnet_dir = join_path(self.stage.source_path, "stanfordgasnet", "gasnet") options.append('-DLegion_EMBED_GASNet=ON') options.append('-DLegion_EMBED_GASNet_LOCALSRC=%s' % gasnet_dir) gasnet_conduit = spec.variants['conduit'].value options.append('-DGASNet_CONDUIT=%s' % gasnet_conduit) if '+gasnet_debug' in spec: options.append('-DLegion_EMBED_GASNet_CONFIGURE_ARGS=--enable-debug') elif 'network=mpi' in spec: options.append('-DLegion_NETWORKS=mpi') if spec.variants['gasnet_root'].value != 'none': raise InstallError("'gasnet_root' is only valid when 'network=gasnet'.") else: if spec.variants['gasnet_root'].value != 'none': raise InstallError("'gasnet_root' is only valid when 'network=gasnet'.") options.append('-DLegion_EMBED_GASNet=OFF') if '+shared' in spec: options.append('-DBUILD_SHARED_LIBS=ON') else: options.append('-DBUILD_SHARED_LIBS=OFF') if '+bounds_checks' in spec: # default is off. options.append('-DLegion_BOUNDS_CHECKS=ON') if '+privilege_checks' in spec: # default is off. options.append('-DLegion_PRIVILEGE_CHECKS=ON') if '+enable_tls' in spec: # default is off. options.append('-DLegion_ENABLE_TLS=ON') if 'output_level' in spec: level = str.upper(spec.variants['output_level'].value) options.append('-DLegion_OUTPUT_LEVEL=%s' % level) if '+spy' in spec: # default is off. options.append('-DLegion_SPY=ON') if '+cuda' in spec: cuda_arch = spec.variants['cuda_arch'].value options.append('-DLegion_USE_CUDA=ON') options.append('-DLegion_GPU_REDUCTIONS=ON') options.append('-DLegion_CUDA_ARCH=%s' % cuda_arch) if '+cuda_hijack' in spec: options.append('-DLegion_HIJACK_CUDART=ON') else: options.append('-DLegion_HIJACK_CUDART=OFF') if '+cuda_unsupported_compiler' in spec: options.append('-DCUDA_NVCC_FLAGS:STRING=--allow-unsupported-compiler') if '+fortran' in spec: # default is off. options.append('-DLegion_USE_Fortran=ON') if '+hdf5' in spec: # default is off. options.append('-DLegion_USE_HDF5=ON') if '+hwloc' in spec: # default is off. options.append('-DLegion_USE_HWLOC=ON') if '+kokkos' in spec: # default is off. options.append('-DLegion_USE_Kokkos=ON') os.environ['KOKKOS_CXX_COMPILER'] = spec['kokkos'].kokkos_cxx if '+libdl' in spec: # default is on. options.append('-DLegion_USE_LIBDL=ON') else: options.append('-DLegion_USE_LIBDL=OFF') if '+openmp' in spec: # default is off. options.append('-DLegion_USE_OpenMP=ON') if '+papi' in spec: # default is off. options.append('-DLegion_USE_PAPI=ON') if '+python' in spec: # default is off. options.append('-DLegion_USE_Python=ON') if '+zlib' in spec: # default is on. options.append('-DLegion_USE_ZLIB=ON') else: options.append('-DLegion_USE_ZLIB=OFF') if '+redop_complex' in spec: # default is off. options.append('-DLegion_REDOP_COMPLEX=ON') if '+bindings' in spec: # default is off. options.append('-DLegion_BUILD_BINDINGS=ON') options.append('-DLegion_REDOP_COMPLEX=ON') # required for bindings options.append('-DLegion_USE_Fortran=ON') if spec.variants['build_type'].value == 'Debug': cmake_cxx_flags.extend([ '-DDEBUG_REALM', '-DDEBUG_LEGION', '-ggdb', ]) maxdims = int(spec.variants['max_dims'].value) # TODO: sanity check if maxdims < 0 || > 9??? options.append('-DLegion_MAX_DIM=%d' % maxdims) maxfields = int(spec.variants['max_fields'].value) if (maxfields <= 0): maxfields = 512 # make sure maxfields is a power of two. if not, # find the next largest power of two and use that... if (maxfields & (maxfields - 1) != 0): while maxfields & maxfields - 1: maxfields = maxfields & maxfields - 1 maxfields = maxfields << 1 options.append('-DLegion_MAX_FIELDS=%d' % maxfields) # This disables Legion's CMake build system's logic for targeting the native # CPU architecture in favor of Spack-provided compiler flags options.append('-DBUILD_MARCH:STRING=') return options @run_after('install') def cache_test_sources(self): """Copy the example source files after the package is installed to an install test subdirectory for use during `spack test run`.""" self.cache_extra_test_sources([join_path('examples', 'local_function_tasks')]) def run_local_function_tasks_test(self): """Run stand alone test: local_function_tasks""" test_dir = join_path(self.test_suite.current_test_cache_dir, 'examples', 'local_function_tasks') if not os.path.exists(test_dir): print('Skipping local_function_tasks test') return exe = 'local_function_tasks' cmake_args = ['-DCMAKE_C_COMPILER={0}'.format(self.compiler.cc), '-DCMAKE_CXX_COMPILER={0}'.format(self.compiler.cxx), '-DLegion_DIR={0}'.format(join_path(self.prefix, 'share', 'Legion', 'cmake'))] self.run_test('cmake', options=cmake_args, purpose='test: generate makefile for {0} example'.format(exe), work_dir=test_dir) self.run_test('make', purpose='test: build {0} example'.format(exe), work_dir=test_dir) self.run_test(exe, purpose='test: run {0} example'.format(exe), work_dir=test_dir) def test(self): self.run_local_function_tasks_test()
[]
[]
[ "KOKKOS_CXX_COMPILER" ]
[]
["KOKKOS_CXX_COMPILER"]
python
1
0